Refactor requantization in scalar QS8/QC8/QU8 microkernels
- Rename MAGIC variant to FMAGIC (floating-point min/max + magic bias) and
LRINT variant to LRINTF
- Avoid undefined behaviour in LRINT-variant microkernels
- Remove scalar microkernels with RNDNU requantization as they don't properly
handle requantization scale greater than 1.0
PiperOrigin-RevId: 419542667
diff --git a/BUILD.bazel b/BUILD.bazel
index 4bae2b1..a1ebb64 100644
--- a/BUILD.bazel
+++ b/BUILD.bazel
@@ -216,36 +216,36 @@
"src/f32-vunary/gen/vabs-scalar-x4.c",
"src/f32-vunary/gen/vneg-scalar-x4.c",
"src/f32-vunary/gen/vsqr-scalar-x4.c",
- "src/qc8-dwconv/gen/up2x9-minmax-fp32-scalar-magic.c",
- "src/qc8-dwconv/gen/up2x25-minmax-fp32-scalar-magic.c",
- "src/qc8-gemm/gen/1x2-minmax-fp32-scalar-magic.c",
- "src/qc8-gemm/gen/2x2-minmax-fp32-scalar-magic.c",
- "src/qc8-igemm/gen/1x2-minmax-fp32-scalar-magic.c",
- "src/qc8-igemm/gen/2x2-minmax-fp32-scalar-magic.c",
- "src/qs8-dwconv/gen/up1x9-minmax-fp32-scalar-magic.c",
- "src/qs8-dwconv/gen/up1x25-minmax-fp32-scalar-magic.c",
+ "src/qc8-dwconv/gen/up2x9-minmax-fp32-scalar-fmagic.c",
+ "src/qc8-dwconv/gen/up2x25-minmax-fp32-scalar-fmagic.c",
+ "src/qc8-gemm/gen/1x2-minmax-fp32-scalar-fmagic.c",
+ "src/qc8-gemm/gen/2x2-minmax-fp32-scalar-fmagic.c",
+ "src/qc8-igemm/gen/1x2-minmax-fp32-scalar-fmagic.c",
+ "src/qc8-igemm/gen/2x2-minmax-fp32-scalar-fmagic.c",
+ "src/qs8-dwconv/gen/up1x9-minmax-fp32-scalar-fmagic.c",
+ "src/qs8-dwconv/gen/up1x25-minmax-fp32-scalar-fmagic.c",
"src/qs8-f32-vcvt/gen/vcvt-scalar-x4.c",
"src/qs8-gavgpool/gen/7p7x-minmax-scalar-c1.c",
"src/qs8-gavgpool/gen/7x-minmax-scalar-c1.c",
- "src/qs8-gemm/gen/1x2-minmax-fp32-scalar-magic.c",
- "src/qs8-gemm/gen/2x2-minmax-fp32-scalar-magic.c",
- "src/qs8-igemm/gen/1x2-minmax-fp32-scalar-magic.c",
- "src/qs8-igemm/gen/2x2-minmax-fp32-scalar-magic.c",
+ "src/qs8-gemm/gen/1x2-minmax-fp32-scalar-fmagic.c",
+ "src/qs8-gemm/gen/2x2-minmax-fp32-scalar-fmagic.c",
+ "src/qs8-igemm/gen/1x2-minmax-fp32-scalar-fmagic.c",
+ "src/qs8-igemm/gen/2x2-minmax-fp32-scalar-fmagic.c",
"src/qs8-vadd/gen/minmax-scalar-x1.c",
"src/qs8-vaddc/gen/minmax-scalar-x1.c",
"src/qs8-vmul/gen/minmax-fp32-scalar-x4.c",
"src/qs8-vmulc/gen/minmax-fp32-scalar-x4.c",
"src/qu8-avgpool/9p8x-minmax-scalar-c1.c",
"src/qu8-avgpool/9x-minmax-scalar-c1.c",
- "src/qu8-dwconv/gen/up1x9-minmax-fp32-scalar-magic.c",
- "src/qu8-dwconv/gen/up1x25-minmax-fp32-scalar-magic.c",
+ "src/qu8-dwconv/gen/up1x9-minmax-fp32-scalar-fmagic.c",
+ "src/qu8-dwconv/gen/up1x25-minmax-fp32-scalar-fmagic.c",
"src/qu8-f32-vcvt/gen/vcvt-scalar-x4.c",
"src/qu8-gavgpool/7p7x-minmax-scalar-c1.c",
"src/qu8-gavgpool/7x-minmax-scalar-c1.c",
- "src/qu8-gemm/gen/1x2-minmax-fp32-scalar-magic.c",
- "src/qu8-gemm/gen/2x2-minmax-fp32-scalar-magic.c",
- "src/qu8-igemm/gen/1x2-minmax-fp32-scalar-magic.c",
- "src/qu8-igemm/gen/2x2-minmax-fp32-scalar-magic.c",
+ "src/qu8-gemm/gen/1x2-minmax-fp32-scalar-fmagic.c",
+ "src/qu8-gemm/gen/2x2-minmax-fp32-scalar-fmagic.c",
+ "src/qu8-igemm/gen/1x2-minmax-fp32-scalar-fmagic.c",
+ "src/qu8-igemm/gen/2x2-minmax-fp32-scalar-fmagic.c",
"src/qu8-vadd/gen/minmax-scalar-x1.c",
"src/qu8-vaddc/gen/minmax-scalar-x1.c",
"src/qu8-vmul/gen/minmax-fp32-scalar-x4.c",
@@ -349,48 +349,48 @@
"src/f32-vunary/gen/vabs-scalar-x4.c",
"src/f32-vunary/gen/vneg-scalar-x4.c",
"src/f32-vunary/gen/vsqr-scalar-x4.c",
- "src/qc8-dwconv/gen/up2x9-minmax-fp32-scalar-magic.c",
- "src/qc8-dwconv/gen/up2x25-minmax-fp32-scalar-magic.c",
- "src/qc8-gemm/gen/1x2-minmax-fp32-scalar-magic.c",
- "src/qc8-gemm/gen/1x4-minmax-fp32-scalar-magic.c",
- "src/qc8-gemm/gen/2x2-minmax-fp32-scalar-magic.c",
- "src/qc8-gemm/gen/4x4-minmax-fp32-scalar-magic.c",
- "src/qc8-igemm/gen/1x2-minmax-fp32-scalar-magic.c",
- "src/qc8-igemm/gen/1x4-minmax-fp32-scalar-magic.c",
- "src/qc8-igemm/gen/2x2-minmax-fp32-scalar-magic.c",
- "src/qc8-igemm/gen/4x4-minmax-fp32-scalar-magic.c",
- "src/qs8-dwconv/gen/up2x9-minmax-fp32-scalar-magic.c",
- "src/qs8-dwconv/gen/up2x25-minmax-fp32-scalar-magic.c",
+ "src/qc8-dwconv/gen/up2x9-minmax-fp32-scalar-fmagic.c",
+ "src/qc8-dwconv/gen/up2x25-minmax-fp32-scalar-fmagic.c",
+ "src/qc8-gemm/gen/1x2-minmax-fp32-scalar-fmagic.c",
+ "src/qc8-gemm/gen/1x4-minmax-fp32-scalar-fmagic.c",
+ "src/qc8-gemm/gen/2x2-minmax-fp32-scalar-fmagic.c",
+ "src/qc8-gemm/gen/4x4-minmax-fp32-scalar-fmagic.c",
+ "src/qc8-igemm/gen/1x2-minmax-fp32-scalar-fmagic.c",
+ "src/qc8-igemm/gen/1x4-minmax-fp32-scalar-fmagic.c",
+ "src/qc8-igemm/gen/2x2-minmax-fp32-scalar-fmagic.c",
+ "src/qc8-igemm/gen/4x4-minmax-fp32-scalar-fmagic.c",
+ "src/qs8-dwconv/gen/up2x9-minmax-fp32-scalar-fmagic.c",
+ "src/qs8-dwconv/gen/up2x25-minmax-fp32-scalar-fmagic.c",
"src/qs8-f32-vcvt/gen/vcvt-scalar-x1.c",
"src/qs8-gavgpool/gen/7p7x-minmax-scalar-c4.c",
"src/qs8-gavgpool/gen/7x-minmax-scalar-c4.c",
- "src/qs8-gemm/gen/1x2-minmax-fp32-scalar-magic.c",
- "src/qs8-gemm/gen/1x4-minmax-fp32-scalar-magic.c",
- "src/qs8-gemm/gen/2x2-minmax-fp32-scalar-magic.c",
- "src/qs8-gemm/gen/4x4-minmax-fp32-scalar-magic.c",
- "src/qs8-igemm/gen/1x2-minmax-fp32-scalar-magic.c",
- "src/qs8-igemm/gen/1x4-minmax-fp32-scalar-magic.c",
- "src/qs8-igemm/gen/2x2-minmax-fp32-scalar-magic.c",
- "src/qs8-igemm/gen/4x4-minmax-fp32-scalar-magic.c",
+ "src/qs8-gemm/gen/1x2-minmax-fp32-scalar-fmagic.c",
+ "src/qs8-gemm/gen/1x4-minmax-fp32-scalar-fmagic.c",
+ "src/qs8-gemm/gen/2x2-minmax-fp32-scalar-fmagic.c",
+ "src/qs8-gemm/gen/4x4-minmax-fp32-scalar-fmagic.c",
+ "src/qs8-igemm/gen/1x2-minmax-fp32-scalar-fmagic.c",
+ "src/qs8-igemm/gen/1x4-minmax-fp32-scalar-fmagic.c",
+ "src/qs8-igemm/gen/2x2-minmax-fp32-scalar-fmagic.c",
+ "src/qs8-igemm/gen/4x4-minmax-fp32-scalar-fmagic.c",
"src/qs8-vadd/gen/minmax-scalar-x4.c",
"src/qs8-vaddc/gen/minmax-scalar-x4.c",
"src/qs8-vmul/gen/minmax-fp32-scalar-x4.c",
"src/qs8-vmulc/gen/minmax-fp32-scalar-x4.c",
"src/qu8-avgpool/9p8x-minmax-scalar-c1.c",
"src/qu8-avgpool/9x-minmax-scalar-c1.c",
- "src/qu8-dwconv/gen/up2x9-minmax-fp32-scalar-magic.c",
- "src/qu8-dwconv/gen/up2x25-minmax-fp32-scalar-magic.c",
+ "src/qu8-dwconv/gen/up2x9-minmax-fp32-scalar-fmagic.c",
+ "src/qu8-dwconv/gen/up2x25-minmax-fp32-scalar-fmagic.c",
"src/qu8-f32-vcvt/gen/vcvt-scalar-x1.c",
"src/qu8-gavgpool/7p7x-minmax-scalar-c1.c",
"src/qu8-gavgpool/7x-minmax-scalar-c1.c",
- "src/qu8-gemm/gen/1x2-minmax-fp32-scalar-magic.c",
- "src/qu8-gemm/gen/1x4-minmax-fp32-scalar-magic.c",
- "src/qu8-gemm/gen/2x2-minmax-fp32-scalar-magic.c",
- "src/qu8-gemm/gen/4x4-minmax-fp32-scalar-magic.c",
- "src/qu8-igemm/gen/1x2-minmax-fp32-scalar-magic.c",
- "src/qu8-igemm/gen/1x4-minmax-fp32-scalar-magic.c",
- "src/qu8-igemm/gen/2x2-minmax-fp32-scalar-magic.c",
- "src/qu8-igemm/gen/4x4-minmax-fp32-scalar-magic.c",
+ "src/qu8-gemm/gen/1x2-minmax-fp32-scalar-fmagic.c",
+ "src/qu8-gemm/gen/1x4-minmax-fp32-scalar-fmagic.c",
+ "src/qu8-gemm/gen/2x2-minmax-fp32-scalar-fmagic.c",
+ "src/qu8-gemm/gen/4x4-minmax-fp32-scalar-fmagic.c",
+ "src/qu8-igemm/gen/1x2-minmax-fp32-scalar-fmagic.c",
+ "src/qu8-igemm/gen/1x4-minmax-fp32-scalar-fmagic.c",
+ "src/qu8-igemm/gen/2x2-minmax-fp32-scalar-fmagic.c",
+ "src/qu8-igemm/gen/4x4-minmax-fp32-scalar-fmagic.c",
"src/qu8-vadd/gen/minmax-scalar-x4.c",
"src/qu8-vaddc/gen/minmax-scalar-x4.c",
"src/qu8-vmul/gen/minmax-fp32-scalar-x4.c",
@@ -826,62 +826,62 @@
"src/math/sigmoid-scalar-rr2-lut2048-p1-div.c",
"src/math/sigmoid-scalar-rr2-p5-div.c",
"src/params-init.c",
- "src/qc8-dwconv/gen/up1x9-minmax-fp32-scalar-lrint.c",
- "src/qc8-dwconv/gen/up1x9-minmax-fp32-scalar-magic.c",
- "src/qc8-dwconv/gen/up1x25-minmax-fp32-scalar-lrint.c",
- "src/qc8-dwconv/gen/up1x25-minmax-fp32-scalar-magic.c",
- "src/qc8-dwconv/gen/up2x9-minmax-fp32-scalar-lrint.c",
- "src/qc8-dwconv/gen/up2x9-minmax-fp32-scalar-magic.c",
- "src/qc8-dwconv/gen/up2x25-minmax-fp32-scalar-lrint.c",
- "src/qc8-dwconv/gen/up2x25-minmax-fp32-scalar-magic.c",
- "src/qc8-dwconv/gen/up4x9-minmax-fp32-scalar-lrint.c",
- "src/qc8-dwconv/gen/up4x9-minmax-fp32-scalar-magic.c",
- "src/qc8-dwconv/gen/up4x25-minmax-fp32-scalar-lrint.c",
- "src/qc8-dwconv/gen/up4x25-minmax-fp32-scalar-magic.c",
- "src/qc8-gemm/gen/1x2-minmax-fp32-scalar-lrint.c",
- "src/qc8-gemm/gen/1x2-minmax-fp32-scalar-magic.c",
- "src/qc8-gemm/gen/1x4-minmax-fp32-scalar-lrint.c",
- "src/qc8-gemm/gen/1x4-minmax-fp32-scalar-magic.c",
- "src/qc8-gemm/gen/2x2-minmax-fp32-scalar-lrint.c",
- "src/qc8-gemm/gen/2x2-minmax-fp32-scalar-magic.c",
- "src/qc8-gemm/gen/2x4-minmax-fp32-scalar-lrint.c",
- "src/qc8-gemm/gen/2x4-minmax-fp32-scalar-magic.c",
- "src/qc8-gemm/gen/3x2-minmax-fp32-scalar-lrint.c",
- "src/qc8-gemm/gen/3x2-minmax-fp32-scalar-magic.c",
- "src/qc8-gemm/gen/3x4-minmax-fp32-scalar-lrint.c",
- "src/qc8-gemm/gen/3x4-minmax-fp32-scalar-magic.c",
- "src/qc8-gemm/gen/4x2-minmax-fp32-scalar-lrint.c",
- "src/qc8-gemm/gen/4x2-minmax-fp32-scalar-magic.c",
- "src/qc8-gemm/gen/4x4-minmax-fp32-scalar-lrint.c",
- "src/qc8-gemm/gen/4x4-minmax-fp32-scalar-magic.c",
- "src/qc8-igemm/gen/1x2-minmax-fp32-scalar-lrint.c",
- "src/qc8-igemm/gen/1x2-minmax-fp32-scalar-magic.c",
- "src/qc8-igemm/gen/1x4-minmax-fp32-scalar-lrint.c",
- "src/qc8-igemm/gen/1x4-minmax-fp32-scalar-magic.c",
- "src/qc8-igemm/gen/2x2-minmax-fp32-scalar-lrint.c",
- "src/qc8-igemm/gen/2x2-minmax-fp32-scalar-magic.c",
- "src/qc8-igemm/gen/2x4-minmax-fp32-scalar-lrint.c",
- "src/qc8-igemm/gen/2x4-minmax-fp32-scalar-magic.c",
- "src/qc8-igemm/gen/3x2-minmax-fp32-scalar-lrint.c",
- "src/qc8-igemm/gen/3x2-minmax-fp32-scalar-magic.c",
- "src/qc8-igemm/gen/3x4-minmax-fp32-scalar-lrint.c",
- "src/qc8-igemm/gen/3x4-minmax-fp32-scalar-magic.c",
- "src/qc8-igemm/gen/4x2-minmax-fp32-scalar-lrint.c",
- "src/qc8-igemm/gen/4x2-minmax-fp32-scalar-magic.c",
- "src/qc8-igemm/gen/4x4-minmax-fp32-scalar-lrint.c",
- "src/qc8-igemm/gen/4x4-minmax-fp32-scalar-magic.c",
- "src/qs8-dwconv/gen/up1x9-minmax-fp32-scalar-lrint.c",
- "src/qs8-dwconv/gen/up1x9-minmax-fp32-scalar-magic.c",
- "src/qs8-dwconv/gen/up1x25-minmax-fp32-scalar-lrint.c",
- "src/qs8-dwconv/gen/up1x25-minmax-fp32-scalar-magic.c",
- "src/qs8-dwconv/gen/up2x9-minmax-fp32-scalar-lrint.c",
- "src/qs8-dwconv/gen/up2x9-minmax-fp32-scalar-magic.c",
- "src/qs8-dwconv/gen/up2x25-minmax-fp32-scalar-lrint.c",
- "src/qs8-dwconv/gen/up2x25-minmax-fp32-scalar-magic.c",
- "src/qs8-dwconv/gen/up4x9-minmax-fp32-scalar-lrint.c",
- "src/qs8-dwconv/gen/up4x9-minmax-fp32-scalar-magic.c",
- "src/qs8-dwconv/gen/up4x25-minmax-fp32-scalar-lrint.c",
- "src/qs8-dwconv/gen/up4x25-minmax-fp32-scalar-magic.c",
+ "src/qc8-dwconv/gen/up1x9-minmax-fp32-scalar-lrintf.c",
+ "src/qc8-dwconv/gen/up1x9-minmax-fp32-scalar-fmagic.c",
+ "src/qc8-dwconv/gen/up1x25-minmax-fp32-scalar-lrintf.c",
+ "src/qc8-dwconv/gen/up1x25-minmax-fp32-scalar-fmagic.c",
+ "src/qc8-dwconv/gen/up2x9-minmax-fp32-scalar-lrintf.c",
+ "src/qc8-dwconv/gen/up2x9-minmax-fp32-scalar-fmagic.c",
+ "src/qc8-dwconv/gen/up2x25-minmax-fp32-scalar-lrintf.c",
+ "src/qc8-dwconv/gen/up2x25-minmax-fp32-scalar-fmagic.c",
+ "src/qc8-dwconv/gen/up4x9-minmax-fp32-scalar-lrintf.c",
+ "src/qc8-dwconv/gen/up4x9-minmax-fp32-scalar-fmagic.c",
+ "src/qc8-dwconv/gen/up4x25-minmax-fp32-scalar-lrintf.c",
+ "src/qc8-dwconv/gen/up4x25-minmax-fp32-scalar-fmagic.c",
+ "src/qc8-gemm/gen/1x2-minmax-fp32-scalar-lrintf.c",
+ "src/qc8-gemm/gen/1x2-minmax-fp32-scalar-fmagic.c",
+ "src/qc8-gemm/gen/1x4-minmax-fp32-scalar-lrintf.c",
+ "src/qc8-gemm/gen/1x4-minmax-fp32-scalar-fmagic.c",
+ "src/qc8-gemm/gen/2x2-minmax-fp32-scalar-lrintf.c",
+ "src/qc8-gemm/gen/2x2-minmax-fp32-scalar-fmagic.c",
+ "src/qc8-gemm/gen/2x4-minmax-fp32-scalar-lrintf.c",
+ "src/qc8-gemm/gen/2x4-minmax-fp32-scalar-fmagic.c",
+ "src/qc8-gemm/gen/3x2-minmax-fp32-scalar-lrintf.c",
+ "src/qc8-gemm/gen/3x2-minmax-fp32-scalar-fmagic.c",
+ "src/qc8-gemm/gen/3x4-minmax-fp32-scalar-lrintf.c",
+ "src/qc8-gemm/gen/3x4-minmax-fp32-scalar-fmagic.c",
+ "src/qc8-gemm/gen/4x2-minmax-fp32-scalar-lrintf.c",
+ "src/qc8-gemm/gen/4x2-minmax-fp32-scalar-fmagic.c",
+ "src/qc8-gemm/gen/4x4-minmax-fp32-scalar-lrintf.c",
+ "src/qc8-gemm/gen/4x4-minmax-fp32-scalar-fmagic.c",
+ "src/qc8-igemm/gen/1x2-minmax-fp32-scalar-lrintf.c",
+ "src/qc8-igemm/gen/1x2-minmax-fp32-scalar-fmagic.c",
+ "src/qc8-igemm/gen/1x4-minmax-fp32-scalar-lrintf.c",
+ "src/qc8-igemm/gen/1x4-minmax-fp32-scalar-fmagic.c",
+ "src/qc8-igemm/gen/2x2-minmax-fp32-scalar-lrintf.c",
+ "src/qc8-igemm/gen/2x2-minmax-fp32-scalar-fmagic.c",
+ "src/qc8-igemm/gen/2x4-minmax-fp32-scalar-lrintf.c",
+ "src/qc8-igemm/gen/2x4-minmax-fp32-scalar-fmagic.c",
+ "src/qc8-igemm/gen/3x2-minmax-fp32-scalar-lrintf.c",
+ "src/qc8-igemm/gen/3x2-minmax-fp32-scalar-fmagic.c",
+ "src/qc8-igemm/gen/3x4-minmax-fp32-scalar-lrintf.c",
+ "src/qc8-igemm/gen/3x4-minmax-fp32-scalar-fmagic.c",
+ "src/qc8-igemm/gen/4x2-minmax-fp32-scalar-lrintf.c",
+ "src/qc8-igemm/gen/4x2-minmax-fp32-scalar-fmagic.c",
+ "src/qc8-igemm/gen/4x4-minmax-fp32-scalar-lrintf.c",
+ "src/qc8-igemm/gen/4x4-minmax-fp32-scalar-fmagic.c",
+ "src/qs8-dwconv/gen/up1x9-minmax-fp32-scalar-lrintf.c",
+ "src/qs8-dwconv/gen/up1x9-minmax-fp32-scalar-fmagic.c",
+ "src/qs8-dwconv/gen/up1x25-minmax-fp32-scalar-lrintf.c",
+ "src/qs8-dwconv/gen/up1x25-minmax-fp32-scalar-fmagic.c",
+ "src/qs8-dwconv/gen/up2x9-minmax-fp32-scalar-lrintf.c",
+ "src/qs8-dwconv/gen/up2x9-minmax-fp32-scalar-fmagic.c",
+ "src/qs8-dwconv/gen/up2x25-minmax-fp32-scalar-lrintf.c",
+ "src/qs8-dwconv/gen/up2x25-minmax-fp32-scalar-fmagic.c",
+ "src/qs8-dwconv/gen/up4x9-minmax-fp32-scalar-lrintf.c",
+ "src/qs8-dwconv/gen/up4x9-minmax-fp32-scalar-fmagic.c",
+ "src/qs8-dwconv/gen/up4x25-minmax-fp32-scalar-lrintf.c",
+ "src/qs8-dwconv/gen/up4x25-minmax-fp32-scalar-fmagic.c",
"src/qs8-f32-vcvt/gen/vcvt-scalar-x1.c",
"src/qs8-f32-vcvt/gen/vcvt-scalar-x2.c",
"src/qs8-f32-vcvt/gen/vcvt-scalar-x3.c",
@@ -892,56 +892,40 @@
"src/qs8-gavgpool/gen/7x-minmax-scalar-c1.c",
"src/qs8-gavgpool/gen/7x-minmax-scalar-c2.c",
"src/qs8-gavgpool/gen/7x-minmax-scalar-c4.c",
- "src/qs8-gemm/gen/1x2-minmax-fp32-scalar-lrint.c",
- "src/qs8-gemm/gen/1x2-minmax-fp32-scalar-magic.c",
- "src/qs8-gemm/gen/1x2-minmax-rndnu-scalar.c",
- "src/qs8-gemm/gen/1x4-minmax-fp32-scalar-lrint.c",
- "src/qs8-gemm/gen/1x4-minmax-fp32-scalar-magic.c",
- "src/qs8-gemm/gen/1x4-minmax-rndnu-scalar.c",
- "src/qs8-gemm/gen/2x2-minmax-fp32-scalar-lrint.c",
- "src/qs8-gemm/gen/2x2-minmax-fp32-scalar-magic.c",
- "src/qs8-gemm/gen/2x2-minmax-rndnu-scalar.c",
- "src/qs8-gemm/gen/2x4-minmax-fp32-scalar-lrint.c",
- "src/qs8-gemm/gen/2x4-minmax-fp32-scalar-magic.c",
- "src/qs8-gemm/gen/2x4-minmax-rndnu-scalar.c",
- "src/qs8-gemm/gen/3x2-minmax-fp32-scalar-lrint.c",
- "src/qs8-gemm/gen/3x2-minmax-fp32-scalar-magic.c",
- "src/qs8-gemm/gen/3x2-minmax-rndnu-scalar.c",
- "src/qs8-gemm/gen/3x4-minmax-fp32-scalar-lrint.c",
- "src/qs8-gemm/gen/3x4-minmax-fp32-scalar-magic.c",
- "src/qs8-gemm/gen/3x4-minmax-rndnu-scalar.c",
- "src/qs8-gemm/gen/4x2-minmax-fp32-scalar-lrint.c",
- "src/qs8-gemm/gen/4x2-minmax-fp32-scalar-magic.c",
- "src/qs8-gemm/gen/4x2-minmax-rndnu-scalar.c",
- "src/qs8-gemm/gen/4x4-minmax-fp32-scalar-lrint.c",
- "src/qs8-gemm/gen/4x4-minmax-fp32-scalar-magic.c",
- "src/qs8-gemm/gen/4x4-minmax-rndnu-scalar.c",
- "src/qs8-igemm/gen/1x2-minmax-fp32-scalar-lrint.c",
- "src/qs8-igemm/gen/1x2-minmax-fp32-scalar-magic.c",
- "src/qs8-igemm/gen/1x2-minmax-rndnu-scalar.c",
- "src/qs8-igemm/gen/1x4-minmax-fp32-scalar-lrint.c",
- "src/qs8-igemm/gen/1x4-minmax-fp32-scalar-magic.c",
- "src/qs8-igemm/gen/1x4-minmax-rndnu-scalar.c",
- "src/qs8-igemm/gen/2x2-minmax-fp32-scalar-lrint.c",
- "src/qs8-igemm/gen/2x2-minmax-fp32-scalar-magic.c",
- "src/qs8-igemm/gen/2x2-minmax-rndnu-scalar.c",
- "src/qs8-igemm/gen/2x4-minmax-fp32-scalar-lrint.c",
- "src/qs8-igemm/gen/2x4-minmax-fp32-scalar-magic.c",
- "src/qs8-igemm/gen/2x4-minmax-rndnu-scalar.c",
- "src/qs8-igemm/gen/3x2-minmax-fp32-scalar-lrint.c",
- "src/qs8-igemm/gen/3x2-minmax-fp32-scalar-magic.c",
- "src/qs8-igemm/gen/3x2-minmax-rndnu-scalar.c",
- "src/qs8-igemm/gen/3x4-minmax-fp32-scalar-lrint.c",
- "src/qs8-igemm/gen/3x4-minmax-fp32-scalar-magic.c",
- "src/qs8-igemm/gen/3x4-minmax-rndnu-scalar.c",
- "src/qs8-igemm/gen/4x2-minmax-fp32-scalar-lrint.c",
- "src/qs8-igemm/gen/4x2-minmax-fp32-scalar-magic.c",
- "src/qs8-igemm/gen/4x2-minmax-rndnu-scalar.c",
- "src/qs8-igemm/gen/4x4-minmax-fp32-scalar-lrint.c",
- "src/qs8-igemm/gen/4x4-minmax-fp32-scalar-magic.c",
- "src/qs8-igemm/gen/4x4-minmax-rndnu-scalar.c",
+ "src/qs8-gemm/gen/1x2-minmax-fp32-scalar-lrintf.c",
+ "src/qs8-gemm/gen/1x2-minmax-fp32-scalar-fmagic.c",
+ "src/qs8-gemm/gen/1x4-minmax-fp32-scalar-lrintf.c",
+ "src/qs8-gemm/gen/1x4-minmax-fp32-scalar-fmagic.c",
+ "src/qs8-gemm/gen/2x2-minmax-fp32-scalar-lrintf.c",
+ "src/qs8-gemm/gen/2x2-minmax-fp32-scalar-fmagic.c",
+ "src/qs8-gemm/gen/2x4-minmax-fp32-scalar-lrintf.c",
+ "src/qs8-gemm/gen/2x4-minmax-fp32-scalar-fmagic.c",
+ "src/qs8-gemm/gen/3x2-minmax-fp32-scalar-lrintf.c",
+ "src/qs8-gemm/gen/3x2-minmax-fp32-scalar-fmagic.c",
+ "src/qs8-gemm/gen/3x4-minmax-fp32-scalar-lrintf.c",
+ "src/qs8-gemm/gen/3x4-minmax-fp32-scalar-fmagic.c",
+ "src/qs8-gemm/gen/4x2-minmax-fp32-scalar-lrintf.c",
+ "src/qs8-gemm/gen/4x2-minmax-fp32-scalar-fmagic.c",
+ "src/qs8-gemm/gen/4x4-minmax-fp32-scalar-lrintf.c",
+ "src/qs8-gemm/gen/4x4-minmax-fp32-scalar-fmagic.c",
+ "src/qs8-igemm/gen/1x2-minmax-fp32-scalar-lrintf.c",
+ "src/qs8-igemm/gen/1x2-minmax-fp32-scalar-fmagic.c",
+ "src/qs8-igemm/gen/1x4-minmax-fp32-scalar-lrintf.c",
+ "src/qs8-igemm/gen/1x4-minmax-fp32-scalar-fmagic.c",
+ "src/qs8-igemm/gen/2x2-minmax-fp32-scalar-lrintf.c",
+ "src/qs8-igemm/gen/2x2-minmax-fp32-scalar-fmagic.c",
+ "src/qs8-igemm/gen/2x4-minmax-fp32-scalar-lrintf.c",
+ "src/qs8-igemm/gen/2x4-minmax-fp32-scalar-fmagic.c",
+ "src/qs8-igemm/gen/3x2-minmax-fp32-scalar-lrintf.c",
+ "src/qs8-igemm/gen/3x2-minmax-fp32-scalar-fmagic.c",
+ "src/qs8-igemm/gen/3x4-minmax-fp32-scalar-lrintf.c",
+ "src/qs8-igemm/gen/3x4-minmax-fp32-scalar-fmagic.c",
+ "src/qs8-igemm/gen/4x2-minmax-fp32-scalar-lrintf.c",
+ "src/qs8-igemm/gen/4x2-minmax-fp32-scalar-fmagic.c",
+ "src/qs8-igemm/gen/4x4-minmax-fp32-scalar-lrintf.c",
+ "src/qs8-igemm/gen/4x4-minmax-fp32-scalar-fmagic.c",
"src/qs8-requantization/fp32-scalar-lrintf.c",
- "src/qs8-requantization/fp32-scalar-magic.c",
+ "src/qs8-requantization/fp32-scalar-fmagic.c",
"src/qs8-requantization/gemmlowp-scalar.c",
"src/qs8-requantization/rndna-scalar-signed64.c",
"src/qs8-requantization/rndna-scalar-unsigned32.c",
@@ -961,58 +945,58 @@
"src/qs8-vmulc/gen/minmax-fp32-scalar-x4.c",
"src/qu8-avgpool/9p8x-minmax-scalar-c1.c",
"src/qu8-avgpool/9x-minmax-scalar-c1.c",
- "src/qu8-dwconv/gen/up1x9-minmax-fp32-scalar-lrint.c",
- "src/qu8-dwconv/gen/up1x9-minmax-fp32-scalar-magic.c",
- "src/qu8-dwconv/gen/up1x25-minmax-fp32-scalar-lrint.c",
- "src/qu8-dwconv/gen/up1x25-minmax-fp32-scalar-magic.c",
- "src/qu8-dwconv/gen/up2x9-minmax-fp32-scalar-lrint.c",
- "src/qu8-dwconv/gen/up2x9-minmax-fp32-scalar-magic.c",
- "src/qu8-dwconv/gen/up2x25-minmax-fp32-scalar-lrint.c",
- "src/qu8-dwconv/gen/up2x25-minmax-fp32-scalar-magic.c",
- "src/qu8-dwconv/gen/up4x9-minmax-fp32-scalar-lrint.c",
- "src/qu8-dwconv/gen/up4x9-minmax-fp32-scalar-magic.c",
- "src/qu8-dwconv/gen/up4x25-minmax-fp32-scalar-lrint.c",
- "src/qu8-dwconv/gen/up4x25-minmax-fp32-scalar-magic.c",
+ "src/qu8-dwconv/gen/up1x9-minmax-fp32-scalar-lrintf.c",
+ "src/qu8-dwconv/gen/up1x9-minmax-fp32-scalar-fmagic.c",
+ "src/qu8-dwconv/gen/up1x25-minmax-fp32-scalar-lrintf.c",
+ "src/qu8-dwconv/gen/up1x25-minmax-fp32-scalar-fmagic.c",
+ "src/qu8-dwconv/gen/up2x9-minmax-fp32-scalar-lrintf.c",
+ "src/qu8-dwconv/gen/up2x9-minmax-fp32-scalar-fmagic.c",
+ "src/qu8-dwconv/gen/up2x25-minmax-fp32-scalar-lrintf.c",
+ "src/qu8-dwconv/gen/up2x25-minmax-fp32-scalar-fmagic.c",
+ "src/qu8-dwconv/gen/up4x9-minmax-fp32-scalar-lrintf.c",
+ "src/qu8-dwconv/gen/up4x9-minmax-fp32-scalar-fmagic.c",
+ "src/qu8-dwconv/gen/up4x25-minmax-fp32-scalar-lrintf.c",
+ "src/qu8-dwconv/gen/up4x25-minmax-fp32-scalar-fmagic.c",
"src/qu8-f32-vcvt/gen/vcvt-scalar-x1.c",
"src/qu8-f32-vcvt/gen/vcvt-scalar-x2.c",
"src/qu8-f32-vcvt/gen/vcvt-scalar-x3.c",
"src/qu8-f32-vcvt/gen/vcvt-scalar-x4.c",
"src/qu8-gavgpool/7p7x-minmax-scalar-c1.c",
"src/qu8-gavgpool/7x-minmax-scalar-c1.c",
- "src/qu8-gemm/gen/1x2-minmax-fp32-scalar-lrint.c",
- "src/qu8-gemm/gen/1x2-minmax-fp32-scalar-magic.c",
- "src/qu8-gemm/gen/1x4-minmax-fp32-scalar-lrint.c",
- "src/qu8-gemm/gen/1x4-minmax-fp32-scalar-magic.c",
- "src/qu8-gemm/gen/2x2-minmax-fp32-scalar-lrint.c",
- "src/qu8-gemm/gen/2x2-minmax-fp32-scalar-magic.c",
- "src/qu8-gemm/gen/2x4-minmax-fp32-scalar-lrint.c",
- "src/qu8-gemm/gen/2x4-minmax-fp32-scalar-magic.c",
- "src/qu8-gemm/gen/3x2-minmax-fp32-scalar-lrint.c",
- "src/qu8-gemm/gen/3x2-minmax-fp32-scalar-magic.c",
- "src/qu8-gemm/gen/3x4-minmax-fp32-scalar-lrint.c",
- "src/qu8-gemm/gen/3x4-minmax-fp32-scalar-magic.c",
- "src/qu8-gemm/gen/4x2-minmax-fp32-scalar-lrint.c",
- "src/qu8-gemm/gen/4x2-minmax-fp32-scalar-magic.c",
- "src/qu8-gemm/gen/4x4-minmax-fp32-scalar-lrint.c",
- "src/qu8-gemm/gen/4x4-minmax-fp32-scalar-magic.c",
- "src/qu8-igemm/gen/1x2-minmax-fp32-scalar-lrint.c",
- "src/qu8-igemm/gen/1x2-minmax-fp32-scalar-magic.c",
- "src/qu8-igemm/gen/1x4-minmax-fp32-scalar-lrint.c",
- "src/qu8-igemm/gen/1x4-minmax-fp32-scalar-magic.c",
- "src/qu8-igemm/gen/2x2-minmax-fp32-scalar-lrint.c",
- "src/qu8-igemm/gen/2x2-minmax-fp32-scalar-magic.c",
- "src/qu8-igemm/gen/2x4-minmax-fp32-scalar-lrint.c",
- "src/qu8-igemm/gen/2x4-minmax-fp32-scalar-magic.c",
- "src/qu8-igemm/gen/3x2-minmax-fp32-scalar-lrint.c",
- "src/qu8-igemm/gen/3x2-minmax-fp32-scalar-magic.c",
- "src/qu8-igemm/gen/3x4-minmax-fp32-scalar-lrint.c",
- "src/qu8-igemm/gen/3x4-minmax-fp32-scalar-magic.c",
- "src/qu8-igemm/gen/4x2-minmax-fp32-scalar-lrint.c",
- "src/qu8-igemm/gen/4x2-minmax-fp32-scalar-magic.c",
- "src/qu8-igemm/gen/4x4-minmax-fp32-scalar-lrint.c",
- "src/qu8-igemm/gen/4x4-minmax-fp32-scalar-magic.c",
+ "src/qu8-gemm/gen/1x2-minmax-fp32-scalar-lrintf.c",
+ "src/qu8-gemm/gen/1x2-minmax-fp32-scalar-fmagic.c",
+ "src/qu8-gemm/gen/1x4-minmax-fp32-scalar-lrintf.c",
+ "src/qu8-gemm/gen/1x4-minmax-fp32-scalar-fmagic.c",
+ "src/qu8-gemm/gen/2x2-minmax-fp32-scalar-lrintf.c",
+ "src/qu8-gemm/gen/2x2-minmax-fp32-scalar-fmagic.c",
+ "src/qu8-gemm/gen/2x4-minmax-fp32-scalar-lrintf.c",
+ "src/qu8-gemm/gen/2x4-minmax-fp32-scalar-fmagic.c",
+ "src/qu8-gemm/gen/3x2-minmax-fp32-scalar-lrintf.c",
+ "src/qu8-gemm/gen/3x2-minmax-fp32-scalar-fmagic.c",
+ "src/qu8-gemm/gen/3x4-minmax-fp32-scalar-lrintf.c",
+ "src/qu8-gemm/gen/3x4-minmax-fp32-scalar-fmagic.c",
+ "src/qu8-gemm/gen/4x2-minmax-fp32-scalar-lrintf.c",
+ "src/qu8-gemm/gen/4x2-minmax-fp32-scalar-fmagic.c",
+ "src/qu8-gemm/gen/4x4-minmax-fp32-scalar-lrintf.c",
+ "src/qu8-gemm/gen/4x4-minmax-fp32-scalar-fmagic.c",
+ "src/qu8-igemm/gen/1x2-minmax-fp32-scalar-lrintf.c",
+ "src/qu8-igemm/gen/1x2-minmax-fp32-scalar-fmagic.c",
+ "src/qu8-igemm/gen/1x4-minmax-fp32-scalar-lrintf.c",
+ "src/qu8-igemm/gen/1x4-minmax-fp32-scalar-fmagic.c",
+ "src/qu8-igemm/gen/2x2-minmax-fp32-scalar-lrintf.c",
+ "src/qu8-igemm/gen/2x2-minmax-fp32-scalar-fmagic.c",
+ "src/qu8-igemm/gen/2x4-minmax-fp32-scalar-lrintf.c",
+ "src/qu8-igemm/gen/2x4-minmax-fp32-scalar-fmagic.c",
+ "src/qu8-igemm/gen/3x2-minmax-fp32-scalar-lrintf.c",
+ "src/qu8-igemm/gen/3x2-minmax-fp32-scalar-fmagic.c",
+ "src/qu8-igemm/gen/3x4-minmax-fp32-scalar-lrintf.c",
+ "src/qu8-igemm/gen/3x4-minmax-fp32-scalar-fmagic.c",
+ "src/qu8-igemm/gen/4x2-minmax-fp32-scalar-lrintf.c",
+ "src/qu8-igemm/gen/4x2-minmax-fp32-scalar-fmagic.c",
+ "src/qu8-igemm/gen/4x4-minmax-fp32-scalar-lrintf.c",
+ "src/qu8-igemm/gen/4x4-minmax-fp32-scalar-fmagic.c",
"src/qu8-requantization/fp32-scalar-lrintf.c",
- "src/qu8-requantization/fp32-scalar-magic.c",
+ "src/qu8-requantization/fp32-scalar-fmagic.c",
"src/qu8-requantization/gemmlowp-scalar.c",
"src/qu8-requantization/rndna-scalar-signed64.c",
"src/qu8-requantization/rndna-scalar-unsigned32.c",
diff --git a/CMakeLists.txt b/CMakeLists.txt
index 0d8f61d..cd0f12e 100755
--- a/CMakeLists.txt
+++ b/CMakeLists.txt
@@ -347,36 +347,36 @@
src/f32-vunary/gen/vabs-scalar-x4.c
src/f32-vunary/gen/vneg-scalar-x4.c
src/f32-vunary/gen/vsqr-scalar-x4.c
- src/qc8-dwconv/gen/up2x9-minmax-fp32-scalar-magic.c
- src/qc8-dwconv/gen/up2x25-minmax-fp32-scalar-magic.c
- src/qc8-gemm/gen/1x2-minmax-fp32-scalar-magic.c
- src/qc8-gemm/gen/2x2-minmax-fp32-scalar-magic.c
- src/qc8-igemm/gen/1x2-minmax-fp32-scalar-magic.c
- src/qc8-igemm/gen/2x2-minmax-fp32-scalar-magic.c
- src/qs8-dwconv/gen/up1x9-minmax-fp32-scalar-magic.c
- src/qs8-dwconv/gen/up1x25-minmax-fp32-scalar-magic.c
+ src/qc8-dwconv/gen/up2x9-minmax-fp32-scalar-fmagic.c
+ src/qc8-dwconv/gen/up2x25-minmax-fp32-scalar-fmagic.c
+ src/qc8-gemm/gen/1x2-minmax-fp32-scalar-fmagic.c
+ src/qc8-gemm/gen/2x2-minmax-fp32-scalar-fmagic.c
+ src/qc8-igemm/gen/1x2-minmax-fp32-scalar-fmagic.c
+ src/qc8-igemm/gen/2x2-minmax-fp32-scalar-fmagic.c
+ src/qs8-dwconv/gen/up1x9-minmax-fp32-scalar-fmagic.c
+ src/qs8-dwconv/gen/up1x25-minmax-fp32-scalar-fmagic.c
src/qs8-f32-vcvt/gen/vcvt-scalar-x4.c
src/qs8-gavgpool/gen/7p7x-minmax-scalar-c1.c
src/qs8-gavgpool/gen/7x-minmax-scalar-c1.c
- src/qs8-gemm/gen/1x2-minmax-fp32-scalar-magic.c
- src/qs8-gemm/gen/2x2-minmax-fp32-scalar-magic.c
- src/qs8-igemm/gen/1x2-minmax-fp32-scalar-magic.c
- src/qs8-igemm/gen/2x2-minmax-fp32-scalar-magic.c
+ src/qs8-gemm/gen/1x2-minmax-fp32-scalar-fmagic.c
+ src/qs8-gemm/gen/2x2-minmax-fp32-scalar-fmagic.c
+ src/qs8-igemm/gen/1x2-minmax-fp32-scalar-fmagic.c
+ src/qs8-igemm/gen/2x2-minmax-fp32-scalar-fmagic.c
src/qs8-vadd/gen/minmax-scalar-x1.c
src/qs8-vaddc/gen/minmax-scalar-x1.c
src/qs8-vmul/gen/minmax-fp32-scalar-x4.c
src/qs8-vmulc/gen/minmax-fp32-scalar-x4.c
src/qu8-avgpool/9p8x-minmax-scalar-c1.c
src/qu8-avgpool/9x-minmax-scalar-c1.c
- src/qu8-dwconv/gen/up1x9-minmax-fp32-scalar-magic.c
- src/qu8-dwconv/gen/up1x25-minmax-fp32-scalar-magic.c
+ src/qu8-dwconv/gen/up1x9-minmax-fp32-scalar-fmagic.c
+ src/qu8-dwconv/gen/up1x25-minmax-fp32-scalar-fmagic.c
src/qu8-f32-vcvt/gen/vcvt-scalar-x4.c
src/qu8-gavgpool/7p7x-minmax-scalar-c1.c
src/qu8-gavgpool/7x-minmax-scalar-c1.c
- src/qu8-gemm/gen/1x2-minmax-fp32-scalar-magic.c
- src/qu8-gemm/gen/2x2-minmax-fp32-scalar-magic.c
- src/qu8-igemm/gen/1x2-minmax-fp32-scalar-magic.c
- src/qu8-igemm/gen/2x2-minmax-fp32-scalar-magic.c
+ src/qu8-gemm/gen/1x2-minmax-fp32-scalar-fmagic.c
+ src/qu8-gemm/gen/2x2-minmax-fp32-scalar-fmagic.c
+ src/qu8-igemm/gen/1x2-minmax-fp32-scalar-fmagic.c
+ src/qu8-igemm/gen/2x2-minmax-fp32-scalar-fmagic.c
src/qu8-vadd/gen/minmax-scalar-x1.c
src/qu8-vaddc/gen/minmax-scalar-x1.c
src/qu8-vmul/gen/minmax-fp32-scalar-x4.c
@@ -811,62 +811,62 @@
src/math/sigmoid-scalar-rr2-lut2048-p1-div.c
src/math/sigmoid-scalar-rr2-p5-div.c
src/params-init.c
- src/qc8-dwconv/gen/up1x9-minmax-fp32-scalar-lrint.c
- src/qc8-dwconv/gen/up1x9-minmax-fp32-scalar-magic.c
- src/qc8-dwconv/gen/up1x25-minmax-fp32-scalar-lrint.c
- src/qc8-dwconv/gen/up1x25-minmax-fp32-scalar-magic.c
- src/qc8-dwconv/gen/up2x9-minmax-fp32-scalar-lrint.c
- src/qc8-dwconv/gen/up2x9-minmax-fp32-scalar-magic.c
- src/qc8-dwconv/gen/up2x25-minmax-fp32-scalar-lrint.c
- src/qc8-dwconv/gen/up2x25-minmax-fp32-scalar-magic.c
- src/qc8-dwconv/gen/up4x9-minmax-fp32-scalar-lrint.c
- src/qc8-dwconv/gen/up4x9-minmax-fp32-scalar-magic.c
- src/qc8-dwconv/gen/up4x25-minmax-fp32-scalar-lrint.c
- src/qc8-dwconv/gen/up4x25-minmax-fp32-scalar-magic.c
- src/qc8-gemm/gen/1x2-minmax-fp32-scalar-lrint.c
- src/qc8-gemm/gen/1x2-minmax-fp32-scalar-magic.c
- src/qc8-gemm/gen/1x4-minmax-fp32-scalar-lrint.c
- src/qc8-gemm/gen/1x4-minmax-fp32-scalar-magic.c
- src/qc8-gemm/gen/2x2-minmax-fp32-scalar-lrint.c
- src/qc8-gemm/gen/2x2-minmax-fp32-scalar-magic.c
- src/qc8-gemm/gen/2x4-minmax-fp32-scalar-lrint.c
- src/qc8-gemm/gen/2x4-minmax-fp32-scalar-magic.c
- src/qc8-gemm/gen/3x2-minmax-fp32-scalar-lrint.c
- src/qc8-gemm/gen/3x2-minmax-fp32-scalar-magic.c
- src/qc8-gemm/gen/3x4-minmax-fp32-scalar-lrint.c
- src/qc8-gemm/gen/3x4-minmax-fp32-scalar-magic.c
- src/qc8-gemm/gen/4x2-minmax-fp32-scalar-lrint.c
- src/qc8-gemm/gen/4x2-minmax-fp32-scalar-magic.c
- src/qc8-gemm/gen/4x4-minmax-fp32-scalar-lrint.c
- src/qc8-gemm/gen/4x4-minmax-fp32-scalar-magic.c
- src/qc8-igemm/gen/1x2-minmax-fp32-scalar-lrint.c
- src/qc8-igemm/gen/1x2-minmax-fp32-scalar-magic.c
- src/qc8-igemm/gen/1x4-minmax-fp32-scalar-lrint.c
- src/qc8-igemm/gen/1x4-minmax-fp32-scalar-magic.c
- src/qc8-igemm/gen/2x2-minmax-fp32-scalar-lrint.c
- src/qc8-igemm/gen/2x2-minmax-fp32-scalar-magic.c
- src/qc8-igemm/gen/2x4-minmax-fp32-scalar-lrint.c
- src/qc8-igemm/gen/2x4-minmax-fp32-scalar-magic.c
- src/qc8-igemm/gen/3x2-minmax-fp32-scalar-lrint.c
- src/qc8-igemm/gen/3x2-minmax-fp32-scalar-magic.c
- src/qc8-igemm/gen/3x4-minmax-fp32-scalar-lrint.c
- src/qc8-igemm/gen/3x4-minmax-fp32-scalar-magic.c
- src/qc8-igemm/gen/4x2-minmax-fp32-scalar-lrint.c
- src/qc8-igemm/gen/4x2-minmax-fp32-scalar-magic.c
- src/qc8-igemm/gen/4x4-minmax-fp32-scalar-lrint.c
- src/qc8-igemm/gen/4x4-minmax-fp32-scalar-magic.c
- src/qs8-dwconv/gen/up1x9-minmax-fp32-scalar-lrint.c
- src/qs8-dwconv/gen/up1x9-minmax-fp32-scalar-magic.c
- src/qs8-dwconv/gen/up1x25-minmax-fp32-scalar-lrint.c
- src/qs8-dwconv/gen/up1x25-minmax-fp32-scalar-magic.c
- src/qs8-dwconv/gen/up2x9-minmax-fp32-scalar-lrint.c
- src/qs8-dwconv/gen/up2x9-minmax-fp32-scalar-magic.c
- src/qs8-dwconv/gen/up2x25-minmax-fp32-scalar-lrint.c
- src/qs8-dwconv/gen/up2x25-minmax-fp32-scalar-magic.c
- src/qs8-dwconv/gen/up4x9-minmax-fp32-scalar-lrint.c
- src/qs8-dwconv/gen/up4x9-minmax-fp32-scalar-magic.c
- src/qs8-dwconv/gen/up4x25-minmax-fp32-scalar-lrint.c
- src/qs8-dwconv/gen/up4x25-minmax-fp32-scalar-magic.c
+ src/qc8-dwconv/gen/up1x9-minmax-fp32-scalar-lrintf.c
+ src/qc8-dwconv/gen/up1x9-minmax-fp32-scalar-fmagic.c
+ src/qc8-dwconv/gen/up1x25-minmax-fp32-scalar-lrintf.c
+ src/qc8-dwconv/gen/up1x25-minmax-fp32-scalar-fmagic.c
+ src/qc8-dwconv/gen/up2x9-minmax-fp32-scalar-lrintf.c
+ src/qc8-dwconv/gen/up2x9-minmax-fp32-scalar-fmagic.c
+ src/qc8-dwconv/gen/up2x25-minmax-fp32-scalar-lrintf.c
+ src/qc8-dwconv/gen/up2x25-minmax-fp32-scalar-fmagic.c
+ src/qc8-dwconv/gen/up4x9-minmax-fp32-scalar-lrintf.c
+ src/qc8-dwconv/gen/up4x9-minmax-fp32-scalar-fmagic.c
+ src/qc8-dwconv/gen/up4x25-minmax-fp32-scalar-lrintf.c
+ src/qc8-dwconv/gen/up4x25-minmax-fp32-scalar-fmagic.c
+ src/qc8-gemm/gen/1x2-minmax-fp32-scalar-lrintf.c
+ src/qc8-gemm/gen/1x2-minmax-fp32-scalar-fmagic.c
+ src/qc8-gemm/gen/1x4-minmax-fp32-scalar-lrintf.c
+ src/qc8-gemm/gen/1x4-minmax-fp32-scalar-fmagic.c
+ src/qc8-gemm/gen/2x2-minmax-fp32-scalar-lrintf.c
+ src/qc8-gemm/gen/2x2-minmax-fp32-scalar-fmagic.c
+ src/qc8-gemm/gen/2x4-minmax-fp32-scalar-lrintf.c
+ src/qc8-gemm/gen/2x4-minmax-fp32-scalar-fmagic.c
+ src/qc8-gemm/gen/3x2-minmax-fp32-scalar-lrintf.c
+ src/qc8-gemm/gen/3x2-minmax-fp32-scalar-fmagic.c
+ src/qc8-gemm/gen/3x4-minmax-fp32-scalar-lrintf.c
+ src/qc8-gemm/gen/3x4-minmax-fp32-scalar-fmagic.c
+ src/qc8-gemm/gen/4x2-minmax-fp32-scalar-lrintf.c
+ src/qc8-gemm/gen/4x2-minmax-fp32-scalar-fmagic.c
+ src/qc8-gemm/gen/4x4-minmax-fp32-scalar-lrintf.c
+ src/qc8-gemm/gen/4x4-minmax-fp32-scalar-fmagic.c
+ src/qc8-igemm/gen/1x2-minmax-fp32-scalar-lrintf.c
+ src/qc8-igemm/gen/1x2-minmax-fp32-scalar-fmagic.c
+ src/qc8-igemm/gen/1x4-minmax-fp32-scalar-lrintf.c
+ src/qc8-igemm/gen/1x4-minmax-fp32-scalar-fmagic.c
+ src/qc8-igemm/gen/2x2-minmax-fp32-scalar-lrintf.c
+ src/qc8-igemm/gen/2x2-minmax-fp32-scalar-fmagic.c
+ src/qc8-igemm/gen/2x4-minmax-fp32-scalar-lrintf.c
+ src/qc8-igemm/gen/2x4-minmax-fp32-scalar-fmagic.c
+ src/qc8-igemm/gen/3x2-minmax-fp32-scalar-lrintf.c
+ src/qc8-igemm/gen/3x2-minmax-fp32-scalar-fmagic.c
+ src/qc8-igemm/gen/3x4-minmax-fp32-scalar-lrintf.c
+ src/qc8-igemm/gen/3x4-minmax-fp32-scalar-fmagic.c
+ src/qc8-igemm/gen/4x2-minmax-fp32-scalar-lrintf.c
+ src/qc8-igemm/gen/4x2-minmax-fp32-scalar-fmagic.c
+ src/qc8-igemm/gen/4x4-minmax-fp32-scalar-lrintf.c
+ src/qc8-igemm/gen/4x4-minmax-fp32-scalar-fmagic.c
+ src/qs8-dwconv/gen/up1x9-minmax-fp32-scalar-lrintf.c
+ src/qs8-dwconv/gen/up1x9-minmax-fp32-scalar-fmagic.c
+ src/qs8-dwconv/gen/up1x25-minmax-fp32-scalar-lrintf.c
+ src/qs8-dwconv/gen/up1x25-minmax-fp32-scalar-fmagic.c
+ src/qs8-dwconv/gen/up2x9-minmax-fp32-scalar-lrintf.c
+ src/qs8-dwconv/gen/up2x9-minmax-fp32-scalar-fmagic.c
+ src/qs8-dwconv/gen/up2x25-minmax-fp32-scalar-lrintf.c
+ src/qs8-dwconv/gen/up2x25-minmax-fp32-scalar-fmagic.c
+ src/qs8-dwconv/gen/up4x9-minmax-fp32-scalar-lrintf.c
+ src/qs8-dwconv/gen/up4x9-minmax-fp32-scalar-fmagic.c
+ src/qs8-dwconv/gen/up4x25-minmax-fp32-scalar-lrintf.c
+ src/qs8-dwconv/gen/up4x25-minmax-fp32-scalar-fmagic.c
src/qs8-f32-vcvt/gen/vcvt-scalar-x1.c
src/qs8-f32-vcvt/gen/vcvt-scalar-x2.c
src/qs8-f32-vcvt/gen/vcvt-scalar-x3.c
@@ -877,56 +877,40 @@
src/qs8-gavgpool/gen/7x-minmax-scalar-c1.c
src/qs8-gavgpool/gen/7x-minmax-scalar-c2.c
src/qs8-gavgpool/gen/7x-minmax-scalar-c4.c
- src/qs8-gemm/gen/1x2-minmax-fp32-scalar-lrint.c
- src/qs8-gemm/gen/1x2-minmax-fp32-scalar-magic.c
- src/qs8-gemm/gen/1x2-minmax-rndnu-scalar.c
- src/qs8-gemm/gen/1x4-minmax-fp32-scalar-lrint.c
- src/qs8-gemm/gen/1x4-minmax-fp32-scalar-magic.c
- src/qs8-gemm/gen/1x4-minmax-rndnu-scalar.c
- src/qs8-gemm/gen/2x2-minmax-fp32-scalar-lrint.c
- src/qs8-gemm/gen/2x2-minmax-fp32-scalar-magic.c
- src/qs8-gemm/gen/2x2-minmax-rndnu-scalar.c
- src/qs8-gemm/gen/2x4-minmax-fp32-scalar-lrint.c
- src/qs8-gemm/gen/2x4-minmax-fp32-scalar-magic.c
- src/qs8-gemm/gen/2x4-minmax-rndnu-scalar.c
- src/qs8-gemm/gen/3x2-minmax-fp32-scalar-lrint.c
- src/qs8-gemm/gen/3x2-minmax-fp32-scalar-magic.c
- src/qs8-gemm/gen/3x2-minmax-rndnu-scalar.c
- src/qs8-gemm/gen/3x4-minmax-fp32-scalar-lrint.c
- src/qs8-gemm/gen/3x4-minmax-fp32-scalar-magic.c
- src/qs8-gemm/gen/3x4-minmax-rndnu-scalar.c
- src/qs8-gemm/gen/4x2-minmax-fp32-scalar-lrint.c
- src/qs8-gemm/gen/4x2-minmax-fp32-scalar-magic.c
- src/qs8-gemm/gen/4x2-minmax-rndnu-scalar.c
- src/qs8-gemm/gen/4x4-minmax-fp32-scalar-lrint.c
- src/qs8-gemm/gen/4x4-minmax-fp32-scalar-magic.c
- src/qs8-gemm/gen/4x4-minmax-rndnu-scalar.c
- src/qs8-igemm/gen/1x2-minmax-fp32-scalar-lrint.c
- src/qs8-igemm/gen/1x2-minmax-fp32-scalar-magic.c
- src/qs8-igemm/gen/1x2-minmax-rndnu-scalar.c
- src/qs8-igemm/gen/1x4-minmax-fp32-scalar-lrint.c
- src/qs8-igemm/gen/1x4-minmax-fp32-scalar-magic.c
- src/qs8-igemm/gen/1x4-minmax-rndnu-scalar.c
- src/qs8-igemm/gen/2x2-minmax-fp32-scalar-lrint.c
- src/qs8-igemm/gen/2x2-minmax-fp32-scalar-magic.c
- src/qs8-igemm/gen/2x2-minmax-rndnu-scalar.c
- src/qs8-igemm/gen/2x4-minmax-fp32-scalar-lrint.c
- src/qs8-igemm/gen/2x4-minmax-fp32-scalar-magic.c
- src/qs8-igemm/gen/2x4-minmax-rndnu-scalar.c
- src/qs8-igemm/gen/3x2-minmax-fp32-scalar-lrint.c
- src/qs8-igemm/gen/3x2-minmax-fp32-scalar-magic.c
- src/qs8-igemm/gen/3x2-minmax-rndnu-scalar.c
- src/qs8-igemm/gen/3x4-minmax-fp32-scalar-lrint.c
- src/qs8-igemm/gen/3x4-minmax-fp32-scalar-magic.c
- src/qs8-igemm/gen/3x4-minmax-rndnu-scalar.c
- src/qs8-igemm/gen/4x2-minmax-fp32-scalar-lrint.c
- src/qs8-igemm/gen/4x2-minmax-fp32-scalar-magic.c
- src/qs8-igemm/gen/4x2-minmax-rndnu-scalar.c
- src/qs8-igemm/gen/4x4-minmax-fp32-scalar-lrint.c
- src/qs8-igemm/gen/4x4-minmax-fp32-scalar-magic.c
- src/qs8-igemm/gen/4x4-minmax-rndnu-scalar.c
+ src/qs8-gemm/gen/1x2-minmax-fp32-scalar-lrintf.c
+ src/qs8-gemm/gen/1x2-minmax-fp32-scalar-fmagic.c
+ src/qs8-gemm/gen/1x4-minmax-fp32-scalar-lrintf.c
+ src/qs8-gemm/gen/1x4-minmax-fp32-scalar-fmagic.c
+ src/qs8-gemm/gen/2x2-minmax-fp32-scalar-lrintf.c
+ src/qs8-gemm/gen/2x2-minmax-fp32-scalar-fmagic.c
+ src/qs8-gemm/gen/2x4-minmax-fp32-scalar-lrintf.c
+ src/qs8-gemm/gen/2x4-minmax-fp32-scalar-fmagic.c
+ src/qs8-gemm/gen/3x2-minmax-fp32-scalar-lrintf.c
+ src/qs8-gemm/gen/3x2-minmax-fp32-scalar-fmagic.c
+ src/qs8-gemm/gen/3x4-minmax-fp32-scalar-lrintf.c
+ src/qs8-gemm/gen/3x4-minmax-fp32-scalar-fmagic.c
+ src/qs8-gemm/gen/4x2-minmax-fp32-scalar-lrintf.c
+ src/qs8-gemm/gen/4x2-minmax-fp32-scalar-fmagic.c
+ src/qs8-gemm/gen/4x4-minmax-fp32-scalar-lrintf.c
+ src/qs8-gemm/gen/4x4-minmax-fp32-scalar-fmagic.c
+ src/qs8-igemm/gen/1x2-minmax-fp32-scalar-lrintf.c
+ src/qs8-igemm/gen/1x2-minmax-fp32-scalar-fmagic.c
+ src/qs8-igemm/gen/1x4-minmax-fp32-scalar-lrintf.c
+ src/qs8-igemm/gen/1x4-minmax-fp32-scalar-fmagic.c
+ src/qs8-igemm/gen/2x2-minmax-fp32-scalar-lrintf.c
+ src/qs8-igemm/gen/2x2-minmax-fp32-scalar-fmagic.c
+ src/qs8-igemm/gen/2x4-minmax-fp32-scalar-lrintf.c
+ src/qs8-igemm/gen/2x4-minmax-fp32-scalar-fmagic.c
+ src/qs8-igemm/gen/3x2-minmax-fp32-scalar-lrintf.c
+ src/qs8-igemm/gen/3x2-minmax-fp32-scalar-fmagic.c
+ src/qs8-igemm/gen/3x4-minmax-fp32-scalar-lrintf.c
+ src/qs8-igemm/gen/3x4-minmax-fp32-scalar-fmagic.c
+ src/qs8-igemm/gen/4x2-minmax-fp32-scalar-lrintf.c
+ src/qs8-igemm/gen/4x2-minmax-fp32-scalar-fmagic.c
+ src/qs8-igemm/gen/4x4-minmax-fp32-scalar-lrintf.c
+ src/qs8-igemm/gen/4x4-minmax-fp32-scalar-fmagic.c
src/qs8-requantization/fp32-scalar-lrintf.c
- src/qs8-requantization/fp32-scalar-magic.c
+ src/qs8-requantization/fp32-scalar-fmagic.c
src/qs8-requantization/gemmlowp-scalar.c
src/qs8-requantization/rndna-scalar-signed64.c
src/qs8-requantization/rndna-scalar-unsigned32.c
@@ -946,58 +930,58 @@
src/qs8-vmulc/gen/minmax-fp32-scalar-x4.c
src/qu8-avgpool/9p8x-minmax-scalar-c1.c
src/qu8-avgpool/9x-minmax-scalar-c1.c
- src/qu8-dwconv/gen/up1x9-minmax-fp32-scalar-lrint.c
- src/qu8-dwconv/gen/up1x9-minmax-fp32-scalar-magic.c
- src/qu8-dwconv/gen/up1x25-minmax-fp32-scalar-lrint.c
- src/qu8-dwconv/gen/up1x25-minmax-fp32-scalar-magic.c
- src/qu8-dwconv/gen/up2x9-minmax-fp32-scalar-lrint.c
- src/qu8-dwconv/gen/up2x9-minmax-fp32-scalar-magic.c
- src/qu8-dwconv/gen/up2x25-minmax-fp32-scalar-lrint.c
- src/qu8-dwconv/gen/up2x25-minmax-fp32-scalar-magic.c
- src/qu8-dwconv/gen/up4x9-minmax-fp32-scalar-lrint.c
- src/qu8-dwconv/gen/up4x9-minmax-fp32-scalar-magic.c
- src/qu8-dwconv/gen/up4x25-minmax-fp32-scalar-lrint.c
- src/qu8-dwconv/gen/up4x25-minmax-fp32-scalar-magic.c
+ src/qu8-dwconv/gen/up1x9-minmax-fp32-scalar-lrintf.c
+ src/qu8-dwconv/gen/up1x9-minmax-fp32-scalar-fmagic.c
+ src/qu8-dwconv/gen/up1x25-minmax-fp32-scalar-lrintf.c
+ src/qu8-dwconv/gen/up1x25-minmax-fp32-scalar-fmagic.c
+ src/qu8-dwconv/gen/up2x9-minmax-fp32-scalar-lrintf.c
+ src/qu8-dwconv/gen/up2x9-minmax-fp32-scalar-fmagic.c
+ src/qu8-dwconv/gen/up2x25-minmax-fp32-scalar-lrintf.c
+ src/qu8-dwconv/gen/up2x25-minmax-fp32-scalar-fmagic.c
+ src/qu8-dwconv/gen/up4x9-minmax-fp32-scalar-lrintf.c
+ src/qu8-dwconv/gen/up4x9-minmax-fp32-scalar-fmagic.c
+ src/qu8-dwconv/gen/up4x25-minmax-fp32-scalar-lrintf.c
+ src/qu8-dwconv/gen/up4x25-minmax-fp32-scalar-fmagic.c
src/qu8-f32-vcvt/gen/vcvt-scalar-x1.c
src/qu8-f32-vcvt/gen/vcvt-scalar-x2.c
src/qu8-f32-vcvt/gen/vcvt-scalar-x3.c
src/qu8-f32-vcvt/gen/vcvt-scalar-x4.c
src/qu8-gavgpool/7p7x-minmax-scalar-c1.c
src/qu8-gavgpool/7x-minmax-scalar-c1.c
- src/qu8-gemm/gen/1x2-minmax-fp32-scalar-lrint.c
- src/qu8-gemm/gen/1x2-minmax-fp32-scalar-magic.c
- src/qu8-gemm/gen/1x4-minmax-fp32-scalar-lrint.c
- src/qu8-gemm/gen/1x4-minmax-fp32-scalar-magic.c
- src/qu8-gemm/gen/2x2-minmax-fp32-scalar-lrint.c
- src/qu8-gemm/gen/2x2-minmax-fp32-scalar-magic.c
- src/qu8-gemm/gen/2x4-minmax-fp32-scalar-lrint.c
- src/qu8-gemm/gen/2x4-minmax-fp32-scalar-magic.c
- src/qu8-gemm/gen/3x2-minmax-fp32-scalar-lrint.c
- src/qu8-gemm/gen/3x2-minmax-fp32-scalar-magic.c
- src/qu8-gemm/gen/3x4-minmax-fp32-scalar-lrint.c
- src/qu8-gemm/gen/3x4-minmax-fp32-scalar-magic.c
- src/qu8-gemm/gen/4x2-minmax-fp32-scalar-lrint.c
- src/qu8-gemm/gen/4x2-minmax-fp32-scalar-magic.c
- src/qu8-gemm/gen/4x4-minmax-fp32-scalar-lrint.c
- src/qu8-gemm/gen/4x4-minmax-fp32-scalar-magic.c
- src/qu8-igemm/gen/1x2-minmax-fp32-scalar-lrint.c
- src/qu8-igemm/gen/1x2-minmax-fp32-scalar-magic.c
- src/qu8-igemm/gen/1x4-minmax-fp32-scalar-lrint.c
- src/qu8-igemm/gen/1x4-minmax-fp32-scalar-magic.c
- src/qu8-igemm/gen/2x2-minmax-fp32-scalar-lrint.c
- src/qu8-igemm/gen/2x2-minmax-fp32-scalar-magic.c
- src/qu8-igemm/gen/2x4-minmax-fp32-scalar-lrint.c
- src/qu8-igemm/gen/2x4-minmax-fp32-scalar-magic.c
- src/qu8-igemm/gen/3x2-minmax-fp32-scalar-lrint.c
- src/qu8-igemm/gen/3x2-minmax-fp32-scalar-magic.c
- src/qu8-igemm/gen/3x4-minmax-fp32-scalar-lrint.c
- src/qu8-igemm/gen/3x4-minmax-fp32-scalar-magic.c
- src/qu8-igemm/gen/4x2-minmax-fp32-scalar-lrint.c
- src/qu8-igemm/gen/4x2-minmax-fp32-scalar-magic.c
- src/qu8-igemm/gen/4x4-minmax-fp32-scalar-lrint.c
- src/qu8-igemm/gen/4x4-minmax-fp32-scalar-magic.c
+ src/qu8-gemm/gen/1x2-minmax-fp32-scalar-lrintf.c
+ src/qu8-gemm/gen/1x2-minmax-fp32-scalar-fmagic.c
+ src/qu8-gemm/gen/1x4-minmax-fp32-scalar-lrintf.c
+ src/qu8-gemm/gen/1x4-minmax-fp32-scalar-fmagic.c
+ src/qu8-gemm/gen/2x2-minmax-fp32-scalar-lrintf.c
+ src/qu8-gemm/gen/2x2-minmax-fp32-scalar-fmagic.c
+ src/qu8-gemm/gen/2x4-minmax-fp32-scalar-lrintf.c
+ src/qu8-gemm/gen/2x4-minmax-fp32-scalar-fmagic.c
+ src/qu8-gemm/gen/3x2-minmax-fp32-scalar-lrintf.c
+ src/qu8-gemm/gen/3x2-minmax-fp32-scalar-fmagic.c
+ src/qu8-gemm/gen/3x4-minmax-fp32-scalar-lrintf.c
+ src/qu8-gemm/gen/3x4-minmax-fp32-scalar-fmagic.c
+ src/qu8-gemm/gen/4x2-minmax-fp32-scalar-lrintf.c
+ src/qu8-gemm/gen/4x2-minmax-fp32-scalar-fmagic.c
+ src/qu8-gemm/gen/4x4-minmax-fp32-scalar-lrintf.c
+ src/qu8-gemm/gen/4x4-minmax-fp32-scalar-fmagic.c
+ src/qu8-igemm/gen/1x2-minmax-fp32-scalar-lrintf.c
+ src/qu8-igemm/gen/1x2-minmax-fp32-scalar-fmagic.c
+ src/qu8-igemm/gen/1x4-minmax-fp32-scalar-lrintf.c
+ src/qu8-igemm/gen/1x4-minmax-fp32-scalar-fmagic.c
+ src/qu8-igemm/gen/2x2-minmax-fp32-scalar-lrintf.c
+ src/qu8-igemm/gen/2x2-minmax-fp32-scalar-fmagic.c
+ src/qu8-igemm/gen/2x4-minmax-fp32-scalar-lrintf.c
+ src/qu8-igemm/gen/2x4-minmax-fp32-scalar-fmagic.c
+ src/qu8-igemm/gen/3x2-minmax-fp32-scalar-lrintf.c
+ src/qu8-igemm/gen/3x2-minmax-fp32-scalar-fmagic.c
+ src/qu8-igemm/gen/3x4-minmax-fp32-scalar-lrintf.c
+ src/qu8-igemm/gen/3x4-minmax-fp32-scalar-fmagic.c
+ src/qu8-igemm/gen/4x2-minmax-fp32-scalar-lrintf.c
+ src/qu8-igemm/gen/4x2-minmax-fp32-scalar-fmagic.c
+ src/qu8-igemm/gen/4x4-minmax-fp32-scalar-lrintf.c
+ src/qu8-igemm/gen/4x4-minmax-fp32-scalar-fmagic.c
src/qu8-requantization/fp32-scalar-lrintf.c
- src/qu8-requantization/fp32-scalar-magic.c
+ src/qu8-requantization/fp32-scalar-fmagic.c
src/qu8-requantization/gemmlowp-scalar.c
src/qu8-requantization/rndna-scalar-signed64.c
src/qu8-requantization/rndna-scalar-unsigned32.c
diff --git a/bench/qs8-dwconv-e2e.cc b/bench/qs8-dwconv-e2e.cc
index ea2cedb..89296fc 100644
--- a/bench/qs8-dwconv-e2e.cc
+++ b/bench/qs8-dwconv-e2e.cc
@@ -411,50 +411,50 @@
#endif // XNN_ARCH_WASMSIMD || XNN_ARCH_WASMRELAXEDSIMD
-static void qs8_dwconv_up1x9__scalar_lrint(benchmark::State& state, models::ExecutionPlanFactory model) {
+static void qs8_dwconv_up1x9__scalar_lrintf(benchmark::State& state, models::ExecutionPlanFactory model) {
DWConvEnd2EndBenchmark(state, model,
- xnn_qs8_dwconv_minmax_fp32_ukernel_up1x9__scalar_lrint,
- xnn_init_qs8_conv_minmax_fp32_scalar_lrint_params,
+ xnn_qs8_dwconv_minmax_fp32_ukernel_up1x9__scalar_lrintf,
+ xnn_init_qs8_conv_minmax_fp32_scalar_lrintf_params,
1 /* channel tile */, 9 /* primary tile */);
}
-static void qs8_dwconv_up2x9__scalar_lrint(benchmark::State& state, models::ExecutionPlanFactory model) {
+static void qs8_dwconv_up2x9__scalar_lrintf(benchmark::State& state, models::ExecutionPlanFactory model) {
DWConvEnd2EndBenchmark(state, model,
- xnn_qs8_dwconv_minmax_fp32_ukernel_up2x9__scalar_lrint,
- xnn_init_qs8_conv_minmax_fp32_scalar_lrint_params,
+ xnn_qs8_dwconv_minmax_fp32_ukernel_up2x9__scalar_lrintf,
+ xnn_init_qs8_conv_minmax_fp32_scalar_lrintf_params,
2 /* channel tile */, 9 /* primary tile */);
}
-static void qs8_dwconv_up4x9__scalar_lrint(benchmark::State& state, models::ExecutionPlanFactory model) {
+static void qs8_dwconv_up4x9__scalar_lrintf(benchmark::State& state, models::ExecutionPlanFactory model) {
DWConvEnd2EndBenchmark(state, model,
- xnn_qs8_dwconv_minmax_fp32_ukernel_up4x9__scalar_lrint,
- xnn_init_qs8_conv_minmax_fp32_scalar_lrint_params,
+ xnn_qs8_dwconv_minmax_fp32_ukernel_up4x9__scalar_lrintf,
+ xnn_init_qs8_conv_minmax_fp32_scalar_lrintf_params,
4 /* channel tile */, 9 /* primary tile */);
}
-static void qs8_dwconv_up1x9__scalar_magic(benchmark::State& state, models::ExecutionPlanFactory model) {
+static void qs8_dwconv_up1x9__scalar_fmagic(benchmark::State& state, models::ExecutionPlanFactory model) {
DWConvEnd2EndBenchmark(state, model,
- xnn_qs8_dwconv_minmax_fp32_ukernel_up1x9__scalar_magic,
- xnn_init_qs8_conv_minmax_fp32_scalar_magic_params,
+ xnn_qs8_dwconv_minmax_fp32_ukernel_up1x9__scalar_fmagic,
+ xnn_init_qs8_conv_minmax_fp32_scalar_fmagic_params,
1 /* channel tile */, 9 /* primary tile */);
}
-static void qs8_dwconv_up2x9__scalar_magic(benchmark::State& state, models::ExecutionPlanFactory model) {
+static void qs8_dwconv_up2x9__scalar_fmagic(benchmark::State& state, models::ExecutionPlanFactory model) {
DWConvEnd2EndBenchmark(state, model,
- xnn_qs8_dwconv_minmax_fp32_ukernel_up2x9__scalar_magic,
- xnn_init_qs8_conv_minmax_fp32_scalar_magic_params,
+ xnn_qs8_dwconv_minmax_fp32_ukernel_up2x9__scalar_fmagic,
+ xnn_init_qs8_conv_minmax_fp32_scalar_fmagic_params,
2 /* channel tile */, 9 /* primary tile */);
}
-static void qs8_dwconv_up4x9__scalar_magic(benchmark::State& state, models::ExecutionPlanFactory model) {
+static void qs8_dwconv_up4x9__scalar_fmagic(benchmark::State& state, models::ExecutionPlanFactory model) {
DWConvEnd2EndBenchmark(state, model,
- xnn_qs8_dwconv_minmax_fp32_ukernel_up4x9__scalar_magic,
- xnn_init_qs8_conv_minmax_fp32_scalar_magic_params,
+ xnn_qs8_dwconv_minmax_fp32_ukernel_up4x9__scalar_fmagic,
+ xnn_init_qs8_conv_minmax_fp32_scalar_fmagic_params,
4 /* channel tile */, 9 /* primary tile */);
}
-BENCHMARK_QS8_END2END(qs8_dwconv_up1x9__scalar_lrint);
-BENCHMARK_QS8_END2END(qs8_dwconv_up2x9__scalar_lrint);
-BENCHMARK_QS8_END2END(qs8_dwconv_up4x9__scalar_lrint);
+BENCHMARK_QS8_END2END(qs8_dwconv_up1x9__scalar_lrintf);
+BENCHMARK_QS8_END2END(qs8_dwconv_up2x9__scalar_lrintf);
+BENCHMARK_QS8_END2END(qs8_dwconv_up4x9__scalar_lrintf);
-BENCHMARK_QS8_END2END(qs8_dwconv_up1x9__scalar_magic);
-BENCHMARK_QS8_END2END(qs8_dwconv_up2x9__scalar_magic);
-BENCHMARK_QS8_END2END(qs8_dwconv_up4x9__scalar_magic);
+BENCHMARK_QS8_END2END(qs8_dwconv_up1x9__scalar_fmagic);
+BENCHMARK_QS8_END2END(qs8_dwconv_up2x9__scalar_fmagic);
+BENCHMARK_QS8_END2END(qs8_dwconv_up4x9__scalar_fmagic);
#ifndef XNNPACK_BENCHMARK_NO_MAIN
diff --git a/bench/qs8-dwconv.cc b/bench/qs8-dwconv.cc
index 8547ae6..751dc3e 100644
--- a/bench/qs8-dwconv.cc
+++ b/bench/qs8-dwconv.cc
@@ -535,50 +535,50 @@
#endif // XNN_ARCH_WASMSIMD || XNN_ARCH_WASMRELAXEDSIMD
-static void qs8_dwconv_up1x9__scalar_lrint(benchmark::State& state, const char* net) {
+static void qs8_dwconv_up1x9__scalar_lrintf(benchmark::State& state, const char* net) {
DWConvBenchmark(state,
- xnn_qs8_dwconv_minmax_fp32_ukernel_up1x9__scalar_lrint,
- xnn_init_qs8_conv_minmax_fp32_scalar_lrint_params,
+ xnn_qs8_dwconv_minmax_fp32_ukernel_up1x9__scalar_lrintf,
+ xnn_init_qs8_conv_minmax_fp32_scalar_lrintf_params,
1 /* channel tile */, 9 /* primary tile */);
}
-static void qs8_dwconv_up2x9__scalar_lrint(benchmark::State& state, const char* net) {
+static void qs8_dwconv_up2x9__scalar_lrintf(benchmark::State& state, const char* net) {
DWConvBenchmark(state,
- xnn_qs8_dwconv_minmax_fp32_ukernel_up2x9__scalar_lrint,
- xnn_init_qs8_conv_minmax_fp32_scalar_lrint_params,
+ xnn_qs8_dwconv_minmax_fp32_ukernel_up2x9__scalar_lrintf,
+ xnn_init_qs8_conv_minmax_fp32_scalar_lrintf_params,
2 /* channel tile */, 9 /* primary tile */);
}
-static void qs8_dwconv_up4x9__scalar_lrint(benchmark::State& state, const char* net) {
+static void qs8_dwconv_up4x9__scalar_lrintf(benchmark::State& state, const char* net) {
DWConvBenchmark(state,
- xnn_qs8_dwconv_minmax_fp32_ukernel_up4x9__scalar_lrint,
- xnn_init_qs8_conv_minmax_fp32_scalar_lrint_params,
+ xnn_qs8_dwconv_minmax_fp32_ukernel_up4x9__scalar_lrintf,
+ xnn_init_qs8_conv_minmax_fp32_scalar_lrintf_params,
4 /* channel tile */, 9 /* primary tile */);
}
-static void qs8_dwconv_up1x9__scalar_magic(benchmark::State& state, const char* net) {
+static void qs8_dwconv_up1x9__scalar_fmagic(benchmark::State& state, const char* net) {
DWConvBenchmark(state,
- xnn_qs8_dwconv_minmax_fp32_ukernel_up1x9__scalar_magic,
- xnn_init_qs8_conv_minmax_fp32_scalar_magic_params,
+ xnn_qs8_dwconv_minmax_fp32_ukernel_up1x9__scalar_fmagic,
+ xnn_init_qs8_conv_minmax_fp32_scalar_fmagic_params,
1 /* channel tile */, 9 /* primary tile */);
}
-static void qs8_dwconv_up2x9__scalar_magic(benchmark::State& state, const char* net) {
+static void qs8_dwconv_up2x9__scalar_fmagic(benchmark::State& state, const char* net) {
DWConvBenchmark(state,
- xnn_qs8_dwconv_minmax_fp32_ukernel_up2x9__scalar_magic,
- xnn_init_qs8_conv_minmax_fp32_scalar_magic_params,
+ xnn_qs8_dwconv_minmax_fp32_ukernel_up2x9__scalar_fmagic,
+ xnn_init_qs8_conv_minmax_fp32_scalar_fmagic_params,
2 /* channel tile */, 9 /* primary tile */);
}
-static void qs8_dwconv_up4x9__scalar_magic(benchmark::State& state, const char* net) {
+static void qs8_dwconv_up4x9__scalar_fmagic(benchmark::State& state, const char* net) {
DWConvBenchmark(state,
- xnn_qs8_dwconv_minmax_fp32_ukernel_up4x9__scalar_magic,
- xnn_init_qs8_conv_minmax_fp32_scalar_magic_params,
+ xnn_qs8_dwconv_minmax_fp32_ukernel_up4x9__scalar_fmagic,
+ xnn_init_qs8_conv_minmax_fp32_scalar_fmagic_params,
4 /* channel tile */, 9 /* primary tile */);
}
-BENCHMARK_DWCONV(qs8_dwconv_up1x9__scalar_lrint);
-BENCHMARK_DWCONV(qs8_dwconv_up2x9__scalar_lrint);
-BENCHMARK_DWCONV(qs8_dwconv_up4x9__scalar_lrint);
+BENCHMARK_DWCONV(qs8_dwconv_up1x9__scalar_lrintf);
+BENCHMARK_DWCONV(qs8_dwconv_up2x9__scalar_lrintf);
+BENCHMARK_DWCONV(qs8_dwconv_up4x9__scalar_lrintf);
-BENCHMARK_DWCONV(qs8_dwconv_up1x9__scalar_magic);
-BENCHMARK_DWCONV(qs8_dwconv_up2x9__scalar_magic);
-BENCHMARK_DWCONV(qs8_dwconv_up4x9__scalar_magic);
+BENCHMARK_DWCONV(qs8_dwconv_up1x9__scalar_fmagic);
+BENCHMARK_DWCONV(qs8_dwconv_up2x9__scalar_fmagic);
+BENCHMARK_DWCONV(qs8_dwconv_up4x9__scalar_fmagic);
#ifndef XNNPACK_BENCHMARK_NO_MAIN
diff --git a/bench/qs8-gemm-e2e.cc b/bench/qs8-gemm-e2e.cc
index 31efb09..8d2ceda 100644
--- a/bench/qs8-gemm-e2e.cc
+++ b/bench/qs8-gemm-e2e.cc
@@ -2699,72 +2699,72 @@
BENCHMARK_QS8_END2END(qs8_gemm_3x4c8__wasmsimd_mul16_ld128)
#endif // XNN_ARCH_WASMSIMD || XNN_ARCH_WASMRELAXEDSIMD
-static void qs8_gemm_2x2__scalar(benchmark::State& state, models::ExecutionPlanFactory model) {
+static void qs8_gemm_2x2__scalar_fmagic(benchmark::State& state, models::ExecutionPlanFactory model) {
GEMMEnd2EndBenchmark(state, model,
- xnn_qs8_gemm_minmax_rndnu_ukernel_2x2__scalar,
- xnn_qs8_igemm_minmax_rndnu_ukernel_2x2__scalar,
- xnn_qs8_gemm_minmax_rndnu_ukernel_1x2__scalar,
- xnn_qs8_igemm_minmax_rndnu_ukernel_1x2__scalar,
- xnn_init_qs8_conv_minmax_rndnu_scalar_params,
+ xnn_qs8_gemm_minmax_fp32_ukernel_2x2__scalar_fmagic,
+ xnn_qs8_igemm_minmax_fp32_ukernel_2x2__scalar_fmagic,
+ xnn_qs8_gemm_minmax_fp32_ukernel_1x2__scalar_fmagic,
+ xnn_qs8_igemm_minmax_fp32_ukernel_1x2__scalar_fmagic,
+ xnn_init_qs8_conv_minmax_fp32_scalar_fmagic_params,
2 /* mr */, 2 /* nr */);
}
-static void qs8_gemm_3x2__scalar(benchmark::State& state, models::ExecutionPlanFactory model) {
+static void qs8_gemm_3x2__scalar_fmagic(benchmark::State& state, models::ExecutionPlanFactory model) {
GEMMEnd2EndBenchmark(state, model,
- xnn_qs8_gemm_minmax_rndnu_ukernel_3x2__scalar,
- xnn_qs8_igemm_minmax_rndnu_ukernel_3x2__scalar,
- xnn_qs8_gemm_minmax_rndnu_ukernel_1x2__scalar,
- xnn_qs8_igemm_minmax_rndnu_ukernel_1x2__scalar,
- xnn_init_qs8_conv_minmax_rndnu_scalar_params,
+ xnn_qs8_gemm_minmax_fp32_ukernel_3x2__scalar_fmagic,
+ xnn_qs8_igemm_minmax_fp32_ukernel_3x2__scalar_fmagic,
+ xnn_qs8_gemm_minmax_fp32_ukernel_1x2__scalar_fmagic,
+ xnn_qs8_igemm_minmax_fp32_ukernel_1x2__scalar_fmagic,
+ xnn_init_qs8_conv_minmax_fp32_scalar_fmagic_params,
3 /* mr */, 2 /* nr */);
}
-static void qs8_gemm_4x2__scalar(benchmark::State& state, models::ExecutionPlanFactory model) {
+static void qs8_gemm_4x2__scalar_fmagic(benchmark::State& state, models::ExecutionPlanFactory model) {
GEMMEnd2EndBenchmark(state, model,
- xnn_qs8_gemm_minmax_rndnu_ukernel_4x2__scalar,
- xnn_qs8_igemm_minmax_rndnu_ukernel_4x2__scalar,
- xnn_qs8_gemm_minmax_rndnu_ukernel_1x2__scalar,
- xnn_qs8_igemm_minmax_rndnu_ukernel_1x2__scalar,
- xnn_init_qs8_conv_minmax_rndnu_scalar_params,
+ xnn_qs8_gemm_minmax_fp32_ukernel_4x2__scalar_fmagic,
+ xnn_qs8_igemm_minmax_fp32_ukernel_4x2__scalar_fmagic,
+ xnn_qs8_gemm_minmax_fp32_ukernel_1x2__scalar_fmagic,
+ xnn_qs8_igemm_minmax_fp32_ukernel_1x2__scalar_fmagic,
+ xnn_init_qs8_conv_minmax_fp32_scalar_fmagic_params,
4 /* mr */, 2 /* nr */);
}
-static void qs8_gemm_2x4__scalar(benchmark::State& state, models::ExecutionPlanFactory model) {
+static void qs8_gemm_2x4__scalar_fmagic(benchmark::State& state, models::ExecutionPlanFactory model) {
GEMMEnd2EndBenchmark(state, model,
- xnn_qs8_gemm_minmax_rndnu_ukernel_2x4__scalar,
- xnn_qs8_igemm_minmax_rndnu_ukernel_2x4__scalar,
- xnn_qs8_gemm_minmax_rndnu_ukernel_1x4__scalar,
- xnn_qs8_igemm_minmax_rndnu_ukernel_1x4__scalar,
- xnn_init_qs8_conv_minmax_rndnu_scalar_params,
+ xnn_qs8_gemm_minmax_fp32_ukernel_2x4__scalar_fmagic,
+ xnn_qs8_igemm_minmax_fp32_ukernel_2x4__scalar_fmagic,
+ xnn_qs8_gemm_minmax_fp32_ukernel_1x4__scalar_fmagic,
+ xnn_qs8_igemm_minmax_fp32_ukernel_1x4__scalar_fmagic,
+ xnn_init_qs8_conv_minmax_fp32_scalar_fmagic_params,
2 /* mr */, 4 /* nr */);
}
-static void qs8_gemm_3x4__scalar(benchmark::State& state, models::ExecutionPlanFactory model) {
+static void qs8_gemm_3x4__scalar_fmagic(benchmark::State& state, models::ExecutionPlanFactory model) {
GEMMEnd2EndBenchmark(state, model,
- xnn_qs8_gemm_minmax_rndnu_ukernel_3x4__scalar,
- xnn_qs8_igemm_minmax_rndnu_ukernel_3x4__scalar,
- xnn_qs8_gemm_minmax_rndnu_ukernel_1x4__scalar,
- xnn_qs8_igemm_minmax_rndnu_ukernel_1x4__scalar,
- xnn_init_qs8_conv_minmax_rndnu_scalar_params,
+ xnn_qs8_gemm_minmax_fp32_ukernel_3x4__scalar_fmagic,
+ xnn_qs8_igemm_minmax_fp32_ukernel_3x4__scalar_fmagic,
+ xnn_qs8_gemm_minmax_fp32_ukernel_1x4__scalar_fmagic,
+ xnn_qs8_igemm_minmax_fp32_ukernel_1x4__scalar_fmagic,
+ xnn_init_qs8_conv_minmax_fp32_scalar_fmagic_params,
3 /* mr */, 4 /* nr */);
}
-static void qs8_gemm_4x4__scalar(benchmark::State& state, models::ExecutionPlanFactory model) {
+static void qs8_gemm_4x4__scalar_fmagic(benchmark::State& state, models::ExecutionPlanFactory model) {
GEMMEnd2EndBenchmark(state, model,
- xnn_qs8_gemm_minmax_rndnu_ukernel_4x4__scalar,
- xnn_qs8_igemm_minmax_rndnu_ukernel_4x4__scalar,
- xnn_qs8_gemm_minmax_rndnu_ukernel_1x4__scalar,
- xnn_qs8_igemm_minmax_rndnu_ukernel_1x4__scalar,
- xnn_init_qs8_conv_minmax_rndnu_scalar_params,
+ xnn_qs8_gemm_minmax_fp32_ukernel_4x4__scalar_fmagic,
+ xnn_qs8_igemm_minmax_fp32_ukernel_4x4__scalar_fmagic,
+ xnn_qs8_gemm_minmax_fp32_ukernel_1x4__scalar_fmagic,
+ xnn_qs8_igemm_minmax_fp32_ukernel_1x4__scalar_fmagic,
+ xnn_init_qs8_conv_minmax_fp32_scalar_fmagic_params,
4 /* mr */, 4 /* nr */);
}
-BENCHMARK_QS8_END2END(qs8_gemm_2x2__scalar)
-BENCHMARK_QS8_END2END(qs8_gemm_3x2__scalar)
-BENCHMARK_QS8_END2END(qs8_gemm_4x2__scalar)
-BENCHMARK_QS8_END2END(qs8_gemm_2x4__scalar)
-BENCHMARK_QS8_END2END(qs8_gemm_3x4__scalar)
-BENCHMARK_QS8_END2END(qs8_gemm_4x4__scalar)
+BENCHMARK_QS8_END2END(qs8_gemm_2x2__scalar_fmagic)
+BENCHMARK_QS8_END2END(qs8_gemm_3x2__scalar_fmagic)
+BENCHMARK_QS8_END2END(qs8_gemm_4x2__scalar_fmagic)
+BENCHMARK_QS8_END2END(qs8_gemm_2x4__scalar_fmagic)
+BENCHMARK_QS8_END2END(qs8_gemm_3x4__scalar_fmagic)
+BENCHMARK_QS8_END2END(qs8_gemm_4x4__scalar_fmagic)
#ifndef XNNPACK_BENCHMARK_NO_MAIN
BENCHMARK_MAIN();
diff --git a/bench/qs8-gemm.cc b/bench/qs8-gemm.cc
index 1aa54fb..60d555d 100644
--- a/bench/qs8-gemm.cc
+++ b/bench/qs8-gemm.cc
@@ -1788,39 +1788,39 @@
#endif // XNN_ARCH_WASMSIMD || XNN_ARCH_WASMRELAXEDSIMD
-static void qs8_gemm_2x2__scalar(benchmark::State& state, const char* net) {
- GEMMBenchmark(state, xnn_qs8_gemm_minmax_rndnu_ukernel_2x2__scalar, 2, 2, 1, 1,
- xnn_init_qs8_conv_minmax_rndnu_scalar_params);
+static void qs8_gemm_2x2__scalar_fmagic(benchmark::State& state, const char* net) {
+ GEMMBenchmark(state, xnn_qs8_gemm_minmax_fp32_ukernel_2x2__scalar_fmagic, 2, 2, 1, 1,
+ xnn_init_qs8_conv_minmax_fp32_scalar_fmagic_params);
}
-static void qs8_gemm_3x2__scalar(benchmark::State& state, const char* net) {
- GEMMBenchmark(state, xnn_qs8_gemm_minmax_rndnu_ukernel_3x2__scalar, 3, 2, 1, 1,
- xnn_init_qs8_conv_minmax_rndnu_scalar_params);
+static void qs8_gemm_3x2__scalar_fmagic(benchmark::State& state, const char* net) {
+ GEMMBenchmark(state, xnn_qs8_gemm_minmax_fp32_ukernel_3x2__scalar_fmagic, 3, 2, 1, 1,
+ xnn_init_qs8_conv_minmax_fp32_scalar_fmagic_params);
}
-static void qs8_gemm_4x2__scalar(benchmark::State& state, const char* net) {
- GEMMBenchmark(state, xnn_qs8_gemm_minmax_rndnu_ukernel_4x2__scalar, 4, 2, 1, 1,
- xnn_init_qs8_conv_minmax_rndnu_scalar_params);
+static void qs8_gemm_4x2__scalar_fmagic(benchmark::State& state, const char* net) {
+ GEMMBenchmark(state, xnn_qs8_gemm_minmax_fp32_ukernel_4x2__scalar_fmagic, 4, 2, 1, 1,
+ xnn_init_qs8_conv_minmax_fp32_scalar_fmagic_params);
}
-static void qs8_gemm_2x4__scalar(benchmark::State& state, const char* net) {
- GEMMBenchmark(state, xnn_qs8_gemm_minmax_rndnu_ukernel_2x4__scalar, 2, 4, 1, 1,
- xnn_init_qs8_conv_minmax_rndnu_scalar_params);
+static void qs8_gemm_2x4__scalar_fmagic(benchmark::State& state, const char* net) {
+ GEMMBenchmark(state, xnn_qs8_gemm_minmax_fp32_ukernel_2x4__scalar_fmagic, 2, 4, 1, 1,
+ xnn_init_qs8_conv_minmax_fp32_scalar_fmagic_params);
}
-static void qs8_gemm_3x4__scalar(benchmark::State& state, const char* net) {
- GEMMBenchmark(state, xnn_qs8_gemm_minmax_rndnu_ukernel_3x4__scalar, 3, 4, 1, 1,
- xnn_init_qs8_conv_minmax_rndnu_scalar_params);
+static void qs8_gemm_3x4__scalar_fmagic(benchmark::State& state, const char* net) {
+ GEMMBenchmark(state, xnn_qs8_gemm_minmax_fp32_ukernel_3x4__scalar_fmagic, 3, 4, 1, 1,
+ xnn_init_qs8_conv_minmax_fp32_scalar_fmagic_params);
}
-static void qs8_gemm_4x4__scalar(benchmark::State& state, const char* net) {
- GEMMBenchmark(state, xnn_qs8_gemm_minmax_rndnu_ukernel_4x4__scalar, 4, 4, 1, 1,
- xnn_init_qs8_conv_minmax_rndnu_scalar_params);
+static void qs8_gemm_4x4__scalar_fmagic(benchmark::State& state, const char* net) {
+ GEMMBenchmark(state, xnn_qs8_gemm_minmax_fp32_ukernel_4x4__scalar_fmagic, 4, 4, 1, 1,
+ xnn_init_qs8_conv_minmax_fp32_scalar_fmagic_params);
}
-BENCHMARK_GEMM(qs8_gemm_2x2__scalar)
-BENCHMARK_GEMM(qs8_gemm_3x2__scalar)
-BENCHMARK_GEMM(qs8_gemm_4x2__scalar)
+BENCHMARK_GEMM(qs8_gemm_2x2__scalar_fmagic)
+BENCHMARK_GEMM(qs8_gemm_3x2__scalar_fmagic)
+BENCHMARK_GEMM(qs8_gemm_4x2__scalar_fmagic)
-BENCHMARK_GEMM(qs8_gemm_2x4__scalar)
-BENCHMARK_GEMM(qs8_gemm_3x4__scalar)
-BENCHMARK_GEMM(qs8_gemm_4x4__scalar)
+BENCHMARK_GEMM(qs8_gemm_2x4__scalar_fmagic)
+BENCHMARK_GEMM(qs8_gemm_3x4__scalar_fmagic)
+BENCHMARK_GEMM(qs8_gemm_4x4__scalar_fmagic)
#ifdef BENCHMARK_RUY
diff --git a/bench/qs8-requantization.cc b/bench/qs8-requantization.cc
index f20a361..8b8aa3d 100644
--- a/bench/qs8-requantization.cc
+++ b/bench/qs8-requantization.cc
@@ -214,9 +214,9 @@
}
}
-BENCHMARK_F(Requantization, fp32__scalar_magic)(benchmark::State& state) {
+BENCHMARK_F(Requantization, fp32__scalar_fmagic)(benchmark::State& state) {
for (auto _ : state) {
- xnn_qs8_requantize_fp32__scalar_magic(
+ xnn_qs8_requantize_fp32__scalar_fmagic(
n(), input(), 0x1.0p-12f /* scale */, -1 /* zero point */, -127 /* qmin */, 126 /* qmax */, output());
}
}
diff --git a/bench/qu8-dwconv-e2e.cc b/bench/qu8-dwconv-e2e.cc
index c9a6a30..8fe0651 100644
--- a/bench/qu8-dwconv-e2e.cc
+++ b/bench/qu8-dwconv-e2e.cc
@@ -268,50 +268,50 @@
#endif // XNN_ARCH_WASMSIMD || XNN_ARCH_WASMRELAXEDSIMD
-static void qu8_dwconv_up1x9__scalar_lrint(benchmark::State& state, models::ExecutionPlanFactory model) {
+static void qu8_dwconv_up1x9__scalar_lrintf(benchmark::State& state, models::ExecutionPlanFactory model) {
DWConvEnd2EndBenchmark(state, model,
- xnn_qu8_dwconv_minmax_fp32_ukernel_up1x9__scalar_lrint,
- xnn_init_qu8_conv_minmax_fp32_scalar_lrint_params,
+ xnn_qu8_dwconv_minmax_fp32_ukernel_up1x9__scalar_lrintf,
+ xnn_init_qu8_conv_minmax_fp32_scalar_lrintf_params,
1 /* channel tile */, 9 /* primary tile */);
}
-static void qu8_dwconv_up2x9__scalar_lrint(benchmark::State& state, models::ExecutionPlanFactory model) {
+static void qu8_dwconv_up2x9__scalar_lrintf(benchmark::State& state, models::ExecutionPlanFactory model) {
DWConvEnd2EndBenchmark(state, model,
- xnn_qu8_dwconv_minmax_fp32_ukernel_up2x9__scalar_lrint,
- xnn_init_qu8_conv_minmax_fp32_scalar_lrint_params,
+ xnn_qu8_dwconv_minmax_fp32_ukernel_up2x9__scalar_lrintf,
+ xnn_init_qu8_conv_minmax_fp32_scalar_lrintf_params,
2 /* channel tile */, 9 /* primary tile */);
}
-static void qu8_dwconv_up4x9__scalar_lrint(benchmark::State& state, models::ExecutionPlanFactory model) {
+static void qu8_dwconv_up4x9__scalar_lrintf(benchmark::State& state, models::ExecutionPlanFactory model) {
DWConvEnd2EndBenchmark(state, model,
- xnn_qu8_dwconv_minmax_fp32_ukernel_up4x9__scalar_lrint,
- xnn_init_qu8_conv_minmax_fp32_scalar_lrint_params,
+ xnn_qu8_dwconv_minmax_fp32_ukernel_up4x9__scalar_lrintf,
+ xnn_init_qu8_conv_minmax_fp32_scalar_lrintf_params,
4 /* channel tile */, 9 /* primary tile */);
}
-static void qu8_dwconv_up1x9__scalar_magic(benchmark::State& state, models::ExecutionPlanFactory model) {
+static void qu8_dwconv_up1x9__scalar_fmagic(benchmark::State& state, models::ExecutionPlanFactory model) {
DWConvEnd2EndBenchmark(state, model,
- xnn_qu8_dwconv_minmax_fp32_ukernel_up1x9__scalar_magic,
- xnn_init_qu8_conv_minmax_fp32_scalar_magic_params,
+ xnn_qu8_dwconv_minmax_fp32_ukernel_up1x9__scalar_fmagic,
+ xnn_init_qu8_conv_minmax_fp32_scalar_fmagic_params,
1 /* channel tile */, 9 /* primary tile */);
}
-static void qu8_dwconv_up2x9__scalar_magic(benchmark::State& state, models::ExecutionPlanFactory model) {
+static void qu8_dwconv_up2x9__scalar_fmagic(benchmark::State& state, models::ExecutionPlanFactory model) {
DWConvEnd2EndBenchmark(state, model,
- xnn_qu8_dwconv_minmax_fp32_ukernel_up2x9__scalar_magic,
- xnn_init_qu8_conv_minmax_fp32_scalar_magic_params,
+ xnn_qu8_dwconv_minmax_fp32_ukernel_up2x9__scalar_fmagic,
+ xnn_init_qu8_conv_minmax_fp32_scalar_fmagic_params,
2 /* channel tile */, 9 /* primary tile */);
}
-static void qu8_dwconv_up4x9__scalar_magic(benchmark::State& state, models::ExecutionPlanFactory model) {
+static void qu8_dwconv_up4x9__scalar_fmagic(benchmark::State& state, models::ExecutionPlanFactory model) {
DWConvEnd2EndBenchmark(state, model,
- xnn_qu8_dwconv_minmax_fp32_ukernel_up4x9__scalar_magic,
- xnn_init_qu8_conv_minmax_fp32_scalar_magic_params,
+ xnn_qu8_dwconv_minmax_fp32_ukernel_up4x9__scalar_fmagic,
+ xnn_init_qu8_conv_minmax_fp32_scalar_fmagic_params,
4 /* channel tile */, 9 /* primary tile */);
}
-BENCHMARK_QU8_END2END(qu8_dwconv_up1x9__scalar_lrint);
-BENCHMARK_QU8_END2END(qu8_dwconv_up2x9__scalar_lrint);
-BENCHMARK_QU8_END2END(qu8_dwconv_up4x9__scalar_lrint);
+BENCHMARK_QU8_END2END(qu8_dwconv_up1x9__scalar_lrintf);
+BENCHMARK_QU8_END2END(qu8_dwconv_up2x9__scalar_lrintf);
+BENCHMARK_QU8_END2END(qu8_dwconv_up4x9__scalar_lrintf);
-BENCHMARK_QU8_END2END(qu8_dwconv_up1x9__scalar_magic);
-BENCHMARK_QU8_END2END(qu8_dwconv_up2x9__scalar_magic);
-BENCHMARK_QU8_END2END(qu8_dwconv_up4x9__scalar_magic);
+BENCHMARK_QU8_END2END(qu8_dwconv_up1x9__scalar_fmagic);
+BENCHMARK_QU8_END2END(qu8_dwconv_up2x9__scalar_fmagic);
+BENCHMARK_QU8_END2END(qu8_dwconv_up4x9__scalar_fmagic);
#ifndef XNNPACK_BENCHMARK_NO_MAIN
diff --git a/bench/qu8-gemm-e2e.cc b/bench/qu8-gemm-e2e.cc
index 9a4a3f8..da6c7b7 100644
--- a/bench/qu8-gemm-e2e.cc
+++ b/bench/qu8-gemm-e2e.cc
@@ -956,72 +956,72 @@
BENCHMARK_QU8_END2END(qu8_gemm_3x4c8__wasmsimd_mul32_ld128)
#endif // XNN_ARCH_WASMSIMD || XNN_ARCH_WASMRELAXEDSIMD
-static void qu8_gemm_2x2_scalar_magic(benchmark::State& state, models::ExecutionPlanFactory model) {
+static void qu8_gemm_2x2_scalar_fmagic(benchmark::State& state, models::ExecutionPlanFactory model) {
GEMMEnd2EndBenchmark(state, model,
- xnn_qu8_gemm_minmax_fp32_ukernel_2x2__scalar_magic,
- xnn_qu8_igemm_minmax_fp32_ukernel_2x2__scalar_magic,
- xnn_qu8_gemm_minmax_fp32_ukernel_1x2__scalar_magic,
- xnn_qu8_igemm_minmax_fp32_ukernel_1x2__scalar_magic,
- xnn_init_qu8_conv_minmax_fp32_scalar_magic_params,
+ xnn_qu8_gemm_minmax_fp32_ukernel_2x2__scalar_fmagic,
+ xnn_qu8_igemm_minmax_fp32_ukernel_2x2__scalar_fmagic,
+ xnn_qu8_gemm_minmax_fp32_ukernel_1x2__scalar_fmagic,
+ xnn_qu8_igemm_minmax_fp32_ukernel_1x2__scalar_fmagic,
+ xnn_init_qu8_conv_minmax_fp32_scalar_fmagic_params,
2 /* mr */, 2 /* nr */);
}
-static void qu8_gemm_3x2_scalar_magic(benchmark::State& state, models::ExecutionPlanFactory model) {
+static void qu8_gemm_3x2_scalar_fmagic(benchmark::State& state, models::ExecutionPlanFactory model) {
GEMMEnd2EndBenchmark(state, model,
- xnn_qu8_gemm_minmax_fp32_ukernel_3x2__scalar_magic,
- xnn_qu8_igemm_minmax_fp32_ukernel_3x2__scalar_magic,
- xnn_qu8_gemm_minmax_fp32_ukernel_1x2__scalar_magic,
- xnn_qu8_igemm_minmax_fp32_ukernel_1x2__scalar_magic,
- xnn_init_qu8_conv_minmax_fp32_scalar_magic_params,
+ xnn_qu8_gemm_minmax_fp32_ukernel_3x2__scalar_fmagic,
+ xnn_qu8_igemm_minmax_fp32_ukernel_3x2__scalar_fmagic,
+ xnn_qu8_gemm_minmax_fp32_ukernel_1x2__scalar_fmagic,
+ xnn_qu8_igemm_minmax_fp32_ukernel_1x2__scalar_fmagic,
+ xnn_init_qu8_conv_minmax_fp32_scalar_fmagic_params,
3 /* mr */, 2 /* nr */);
}
-static void qu8_gemm_4x2_scalar_magic(benchmark::State& state, models::ExecutionPlanFactory model) {
+static void qu8_gemm_4x2_scalar_fmagic(benchmark::State& state, models::ExecutionPlanFactory model) {
GEMMEnd2EndBenchmark(state, model,
- xnn_qu8_gemm_minmax_fp32_ukernel_4x2__scalar_magic,
- xnn_qu8_igemm_minmax_fp32_ukernel_4x2__scalar_magic,
- xnn_qu8_gemm_minmax_fp32_ukernel_1x2__scalar_magic,
- xnn_qu8_igemm_minmax_fp32_ukernel_1x2__scalar_magic,
- xnn_init_qu8_conv_minmax_fp32_scalar_magic_params,
+ xnn_qu8_gemm_minmax_fp32_ukernel_4x2__scalar_fmagic,
+ xnn_qu8_igemm_minmax_fp32_ukernel_4x2__scalar_fmagic,
+ xnn_qu8_gemm_minmax_fp32_ukernel_1x2__scalar_fmagic,
+ xnn_qu8_igemm_minmax_fp32_ukernel_1x2__scalar_fmagic,
+ xnn_init_qu8_conv_minmax_fp32_scalar_fmagic_params,
4 /* mr */, 2 /* nr */);
}
-static void qu8_gemm_2x4_scalar_magic(benchmark::State& state, models::ExecutionPlanFactory model) {
+static void qu8_gemm_2x4_scalar_fmagic(benchmark::State& state, models::ExecutionPlanFactory model) {
GEMMEnd2EndBenchmark(state, model,
- xnn_qu8_gemm_minmax_fp32_ukernel_2x4__scalar_magic,
- xnn_qu8_igemm_minmax_fp32_ukernel_2x4__scalar_magic,
- xnn_qu8_gemm_minmax_fp32_ukernel_1x4__scalar_magic,
- xnn_qu8_igemm_minmax_fp32_ukernel_1x4__scalar_magic,
- xnn_init_qu8_conv_minmax_fp32_scalar_magic_params,
+ xnn_qu8_gemm_minmax_fp32_ukernel_2x4__scalar_fmagic,
+ xnn_qu8_igemm_minmax_fp32_ukernel_2x4__scalar_fmagic,
+ xnn_qu8_gemm_minmax_fp32_ukernel_1x4__scalar_fmagic,
+ xnn_qu8_igemm_minmax_fp32_ukernel_1x4__scalar_fmagic,
+ xnn_init_qu8_conv_minmax_fp32_scalar_fmagic_params,
2 /* mr */, 4 /* nr */);
}
-static void qu8_gemm_3x4_scalar_magic(benchmark::State& state, models::ExecutionPlanFactory model) {
+static void qu8_gemm_3x4_scalar_fmagic(benchmark::State& state, models::ExecutionPlanFactory model) {
GEMMEnd2EndBenchmark(state, model,
- xnn_qu8_gemm_minmax_fp32_ukernel_3x4__scalar_magic,
- xnn_qu8_igemm_minmax_fp32_ukernel_3x4__scalar_magic,
- xnn_qu8_gemm_minmax_fp32_ukernel_1x4__scalar_magic,
- xnn_qu8_igemm_minmax_fp32_ukernel_1x4__scalar_magic,
- xnn_init_qu8_conv_minmax_fp32_scalar_magic_params,
+ xnn_qu8_gemm_minmax_fp32_ukernel_3x4__scalar_fmagic,
+ xnn_qu8_igemm_minmax_fp32_ukernel_3x4__scalar_fmagic,
+ xnn_qu8_gemm_minmax_fp32_ukernel_1x4__scalar_fmagic,
+ xnn_qu8_igemm_minmax_fp32_ukernel_1x4__scalar_fmagic,
+ xnn_init_qu8_conv_minmax_fp32_scalar_fmagic_params,
3 /* mr */, 4 /* nr */);
}
-static void qu8_gemm_4x4_scalar_magic(benchmark::State& state, models::ExecutionPlanFactory model) {
+static void qu8_gemm_4x4_scalar_fmagic(benchmark::State& state, models::ExecutionPlanFactory model) {
GEMMEnd2EndBenchmark(state, model,
- xnn_qu8_gemm_minmax_fp32_ukernel_4x4__scalar_magic,
- xnn_qu8_igemm_minmax_fp32_ukernel_4x4__scalar_magic,
- xnn_qu8_gemm_minmax_fp32_ukernel_1x4__scalar_magic,
- xnn_qu8_igemm_minmax_fp32_ukernel_1x4__scalar_magic,
- xnn_init_qu8_conv_minmax_fp32_scalar_magic_params,
+ xnn_qu8_gemm_minmax_fp32_ukernel_4x4__scalar_fmagic,
+ xnn_qu8_igemm_minmax_fp32_ukernel_4x4__scalar_fmagic,
+ xnn_qu8_gemm_minmax_fp32_ukernel_1x4__scalar_fmagic,
+ xnn_qu8_igemm_minmax_fp32_ukernel_1x4__scalar_fmagic,
+ xnn_init_qu8_conv_minmax_fp32_scalar_fmagic_params,
4 /* mr */, 4 /* nr */);
}
-BENCHMARK_QU8_END2END(qu8_gemm_2x2_scalar_magic)
-BENCHMARK_QU8_END2END(qu8_gemm_3x2_scalar_magic)
-BENCHMARK_QU8_END2END(qu8_gemm_4x2_scalar_magic)
-BENCHMARK_QU8_END2END(qu8_gemm_2x4_scalar_magic)
-BENCHMARK_QU8_END2END(qu8_gemm_3x4_scalar_magic)
-BENCHMARK_QU8_END2END(qu8_gemm_4x4_scalar_magic)
+BENCHMARK_QU8_END2END(qu8_gemm_2x2_scalar_fmagic)
+BENCHMARK_QU8_END2END(qu8_gemm_3x2_scalar_fmagic)
+BENCHMARK_QU8_END2END(qu8_gemm_4x2_scalar_fmagic)
+BENCHMARK_QU8_END2END(qu8_gemm_2x4_scalar_fmagic)
+BENCHMARK_QU8_END2END(qu8_gemm_3x4_scalar_fmagic)
+BENCHMARK_QU8_END2END(qu8_gemm_4x4_scalar_fmagic)
#ifndef XNNPACK_BENCHMARK_NO_MAIN
BENCHMARK_MAIN();
diff --git a/bench/qu8-gemm.cc b/bench/qu8-gemm.cc
index 9ed34d0..8ec990b 100644
--- a/bench/qu8-gemm.cc
+++ b/bench/qu8-gemm.cc
@@ -1087,120 +1087,120 @@
#endif // XNN_ARCH_WASMSIMD || XNN_ARCH_WASMRELAXEDSIMD
-static void qu8_gemm_1x2__scalar_lrint(benchmark::State& state, const char* net) {
+static void qu8_gemm_1x2__scalar_lrintf(benchmark::State& state, const char* net) {
GEMMBenchmark(state,
- xnn_qu8_gemm_minmax_fp32_ukernel_1x2__scalar_lrint,
- xnn_init_qu8_conv_minmax_fp32_scalar_lrint_params,
+ xnn_qu8_gemm_minmax_fp32_ukernel_1x2__scalar_lrintf,
+ xnn_init_qu8_conv_minmax_fp32_scalar_lrintf_params,
1, 2, 1, 1);
}
-static void qu8_gemm_2x2__scalar_lrint(benchmark::State& state, const char* net) {
+static void qu8_gemm_2x2__scalar_lrintf(benchmark::State& state, const char* net) {
GEMMBenchmark(state,
- xnn_qu8_gemm_minmax_fp32_ukernel_2x2__scalar_lrint,
- xnn_init_qu8_conv_minmax_fp32_scalar_lrint_params,
+ xnn_qu8_gemm_minmax_fp32_ukernel_2x2__scalar_lrintf,
+ xnn_init_qu8_conv_minmax_fp32_scalar_lrintf_params,
2, 2, 1, 1);
}
-static void qu8_gemm_3x2__scalar_lrint(benchmark::State& state, const char* net) {
+static void qu8_gemm_3x2__scalar_lrintf(benchmark::State& state, const char* net) {
GEMMBenchmark(state,
- xnn_qu8_gemm_minmax_fp32_ukernel_3x2__scalar_lrint,
- xnn_init_qu8_conv_minmax_fp32_scalar_lrint_params,
+ xnn_qu8_gemm_minmax_fp32_ukernel_3x2__scalar_lrintf,
+ xnn_init_qu8_conv_minmax_fp32_scalar_lrintf_params,
3, 2, 1, 1);
}
-static void qu8_gemm_4x2__scalar_lrint(benchmark::State& state, const char* net) {
+static void qu8_gemm_4x2__scalar_lrintf(benchmark::State& state, const char* net) {
GEMMBenchmark(state,
- xnn_qu8_gemm_minmax_fp32_ukernel_4x2__scalar_lrint,
- xnn_init_qu8_conv_minmax_fp32_scalar_lrint_params,
+ xnn_qu8_gemm_minmax_fp32_ukernel_4x2__scalar_lrintf,
+ xnn_init_qu8_conv_minmax_fp32_scalar_lrintf_params,
4, 2, 1, 1);
}
-static void qu8_gemm_1x4__scalar_lrint(benchmark::State& state, const char* net) {
+static void qu8_gemm_1x4__scalar_lrintf(benchmark::State& state, const char* net) {
GEMMBenchmark(state,
- xnn_qu8_gemm_minmax_fp32_ukernel_1x4__scalar_lrint,
- xnn_init_qu8_conv_minmax_fp32_scalar_lrint_params,
+ xnn_qu8_gemm_minmax_fp32_ukernel_1x4__scalar_lrintf,
+ xnn_init_qu8_conv_minmax_fp32_scalar_lrintf_params,
1, 4, 1, 1);
}
-static void qu8_gemm_2x4__scalar_lrint(benchmark::State& state, const char* net) {
+static void qu8_gemm_2x4__scalar_lrintf(benchmark::State& state, const char* net) {
GEMMBenchmark(state,
- xnn_qu8_gemm_minmax_fp32_ukernel_2x4__scalar_lrint,
- xnn_init_qu8_conv_minmax_fp32_scalar_lrint_params,
+ xnn_qu8_gemm_minmax_fp32_ukernel_2x4__scalar_lrintf,
+ xnn_init_qu8_conv_minmax_fp32_scalar_lrintf_params,
2, 4, 1, 1);
}
-static void qu8_gemm_3x4__scalar_lrint(benchmark::State& state, const char* net) {
+static void qu8_gemm_3x4__scalar_lrintf(benchmark::State& state, const char* net) {
GEMMBenchmark(state,
- xnn_qu8_gemm_minmax_fp32_ukernel_3x4__scalar_lrint,
- xnn_init_qu8_conv_minmax_fp32_scalar_lrint_params,
+ xnn_qu8_gemm_minmax_fp32_ukernel_3x4__scalar_lrintf,
+ xnn_init_qu8_conv_minmax_fp32_scalar_lrintf_params,
3, 4, 1, 1);
}
-static void qu8_gemm_4x4__scalar_lrint(benchmark::State& state, const char* net) {
+static void qu8_gemm_4x4__scalar_lrintf(benchmark::State& state, const char* net) {
GEMMBenchmark(state,
- xnn_qu8_gemm_minmax_fp32_ukernel_4x4__scalar_lrint,
- xnn_init_qu8_conv_minmax_fp32_scalar_lrint_params,
+ xnn_qu8_gemm_minmax_fp32_ukernel_4x4__scalar_lrintf,
+ xnn_init_qu8_conv_minmax_fp32_scalar_lrintf_params,
4, 4, 1, 1);
}
-static void qu8_gemm_1x2__scalar_magic(benchmark::State& state, const char* net) {
+static void qu8_gemm_1x2__scalar_fmagic(benchmark::State& state, const char* net) {
GEMMBenchmark(state,
- xnn_qu8_gemm_minmax_fp32_ukernel_1x2__scalar_magic,
- xnn_init_qu8_conv_minmax_fp32_scalar_magic_params,
+ xnn_qu8_gemm_minmax_fp32_ukernel_1x2__scalar_fmagic,
+ xnn_init_qu8_conv_minmax_fp32_scalar_fmagic_params,
1, 2, 1, 1);
}
-static void qu8_gemm_2x2__scalar_magic(benchmark::State& state, const char* net) {
+static void qu8_gemm_2x2__scalar_fmagic(benchmark::State& state, const char* net) {
GEMMBenchmark(state,
- xnn_qu8_gemm_minmax_fp32_ukernel_2x2__scalar_magic,
- xnn_init_qu8_conv_minmax_fp32_scalar_magic_params,
+ xnn_qu8_gemm_minmax_fp32_ukernel_2x2__scalar_fmagic,
+ xnn_init_qu8_conv_minmax_fp32_scalar_fmagic_params,
2, 2, 1, 1);
}
-static void qu8_gemm_3x2__scalar_magic(benchmark::State& state, const char* net) {
+static void qu8_gemm_3x2__scalar_fmagic(benchmark::State& state, const char* net) {
GEMMBenchmark(state,
- xnn_qu8_gemm_minmax_fp32_ukernel_3x2__scalar_magic,
- xnn_init_qu8_conv_minmax_fp32_scalar_magic_params,
+ xnn_qu8_gemm_minmax_fp32_ukernel_3x2__scalar_fmagic,
+ xnn_init_qu8_conv_minmax_fp32_scalar_fmagic_params,
3, 2, 1, 1);
}
-static void qu8_gemm_4x2__scalar_magic(benchmark::State& state, const char* net) {
+static void qu8_gemm_4x2__scalar_fmagic(benchmark::State& state, const char* net) {
GEMMBenchmark(state,
- xnn_qu8_gemm_minmax_fp32_ukernel_4x2__scalar_magic,
- xnn_init_qu8_conv_minmax_fp32_scalar_magic_params,
+ xnn_qu8_gemm_minmax_fp32_ukernel_4x2__scalar_fmagic,
+ xnn_init_qu8_conv_minmax_fp32_scalar_fmagic_params,
4, 2, 1, 1);
}
-static void qu8_gemm_1x4__scalar_magic(benchmark::State& state, const char* net) {
+static void qu8_gemm_1x4__scalar_fmagic(benchmark::State& state, const char* net) {
GEMMBenchmark(state,
- xnn_qu8_gemm_minmax_fp32_ukernel_1x4__scalar_magic,
- xnn_init_qu8_conv_minmax_fp32_scalar_magic_params,
+ xnn_qu8_gemm_minmax_fp32_ukernel_1x4__scalar_fmagic,
+ xnn_init_qu8_conv_minmax_fp32_scalar_fmagic_params,
1, 4, 1, 1);
}
-static void qu8_gemm_2x4__scalar_magic(benchmark::State& state, const char* net) {
+static void qu8_gemm_2x4__scalar_fmagic(benchmark::State& state, const char* net) {
GEMMBenchmark(state,
- xnn_qu8_gemm_minmax_fp32_ukernel_2x4__scalar_magic,
- xnn_init_qu8_conv_minmax_fp32_scalar_magic_params,
+ xnn_qu8_gemm_minmax_fp32_ukernel_2x4__scalar_fmagic,
+ xnn_init_qu8_conv_minmax_fp32_scalar_fmagic_params,
2, 4, 1, 1);
}
-static void qu8_gemm_3x4__scalar_magic(benchmark::State& state, const char* net) {
+static void qu8_gemm_3x4__scalar_fmagic(benchmark::State& state, const char* net) {
GEMMBenchmark(state,
- xnn_qu8_gemm_minmax_fp32_ukernel_3x4__scalar_magic,
- xnn_init_qu8_conv_minmax_fp32_scalar_magic_params,
+ xnn_qu8_gemm_minmax_fp32_ukernel_3x4__scalar_fmagic,
+ xnn_init_qu8_conv_minmax_fp32_scalar_fmagic_params,
3, 4, 1, 1);
}
-static void qu8_gemm_4x4__scalar_magic(benchmark::State& state, const char* net) {
+static void qu8_gemm_4x4__scalar_fmagic(benchmark::State& state, const char* net) {
GEMMBenchmark(state,
- xnn_qu8_gemm_minmax_fp32_ukernel_4x4__scalar_magic,
- xnn_init_qu8_conv_minmax_fp32_scalar_magic_params,
+ xnn_qu8_gemm_minmax_fp32_ukernel_4x4__scalar_fmagic,
+ xnn_init_qu8_conv_minmax_fp32_scalar_fmagic_params,
4, 4, 1, 1);
}
-BENCHMARK_GEMM(qu8_gemm_1x2__scalar_lrint)
-BENCHMARK_GEMM(qu8_gemm_2x2__scalar_lrint)
-BENCHMARK_GEMM(qu8_gemm_3x2__scalar_lrint)
-BENCHMARK_GEMM(qu8_gemm_4x2__scalar_lrint)
-BENCHMARK_GEMM(qu8_gemm_1x4__scalar_lrint)
-BENCHMARK_GEMM(qu8_gemm_2x4__scalar_lrint)
-BENCHMARK_GEMM(qu8_gemm_3x4__scalar_lrint)
-BENCHMARK_GEMM(qu8_gemm_4x4__scalar_lrint)
+BENCHMARK_GEMM(qu8_gemm_1x2__scalar_lrintf)
+BENCHMARK_GEMM(qu8_gemm_2x2__scalar_lrintf)
+BENCHMARK_GEMM(qu8_gemm_3x2__scalar_lrintf)
+BENCHMARK_GEMM(qu8_gemm_4x2__scalar_lrintf)
+BENCHMARK_GEMM(qu8_gemm_1x4__scalar_lrintf)
+BENCHMARK_GEMM(qu8_gemm_2x4__scalar_lrintf)
+BENCHMARK_GEMM(qu8_gemm_3x4__scalar_lrintf)
+BENCHMARK_GEMM(qu8_gemm_4x4__scalar_lrintf)
-BENCHMARK_GEMM(qu8_gemm_1x2__scalar_magic)
-BENCHMARK_GEMM(qu8_gemm_2x2__scalar_magic)
-BENCHMARK_GEMM(qu8_gemm_3x2__scalar_magic)
-BENCHMARK_GEMM(qu8_gemm_4x2__scalar_magic)
-BENCHMARK_GEMM(qu8_gemm_1x4__scalar_magic)
-BENCHMARK_GEMM(qu8_gemm_2x4__scalar_magic)
-BENCHMARK_GEMM(qu8_gemm_3x4__scalar_magic)
-BENCHMARK_GEMM(qu8_gemm_4x4__scalar_magic)
+BENCHMARK_GEMM(qu8_gemm_1x2__scalar_fmagic)
+BENCHMARK_GEMM(qu8_gemm_2x2__scalar_fmagic)
+BENCHMARK_GEMM(qu8_gemm_3x2__scalar_fmagic)
+BENCHMARK_GEMM(qu8_gemm_4x2__scalar_fmagic)
+BENCHMARK_GEMM(qu8_gemm_1x4__scalar_fmagic)
+BENCHMARK_GEMM(qu8_gemm_2x4__scalar_fmagic)
+BENCHMARK_GEMM(qu8_gemm_3x4__scalar_fmagic)
+BENCHMARK_GEMM(qu8_gemm_4x4__scalar_fmagic)
#ifdef BENCHMARK_RUY
diff --git a/bench/qu8-requantization.cc b/bench/qu8-requantization.cc
index 0d79ca9..f0fad8b 100644
--- a/bench/qu8-requantization.cc
+++ b/bench/qu8-requantization.cc
@@ -180,9 +180,9 @@
}
}
-BENCHMARK_F(Requantization, fp32__scalar_magic)(benchmark::State& state) {
+BENCHMARK_F(Requantization, fp32__scalar_fmagic)(benchmark::State& state) {
for (auto _ : state) {
- xnn_qu8_requantize_fp32__scalar_magic(
+ xnn_qu8_requantize_fp32__scalar_fmagic(
n(), input(), 0x1.0p-12f /* scale */, 128 /* zero point */, 1 /* qmin */, 254 /* qmax */, output());
}
}
diff --git a/scripts/generate-qs8-dwconv.sh b/scripts/generate-qs8-dwconv.sh
index c729122..75481e3 100755
--- a/scripts/generate-qs8-dwconv.sh
+++ b/scripts/generate-qs8-dwconv.sh
@@ -5,53 +5,53 @@
# LICENSE file in the root directory of this source tree.
################################### Scalar ###################################
-tools/xngen src/qs8-dwconv/unipass-scalar.c.in -D CHANNEL_TILE=1 -D KERNEL_TILE=9 -D REQUANTIZATION=FP32 -D VARIANT=LRINT -D DATATYPE=QC8 -o src/qc8-dwconv/gen/up1x9-minmax-fp32-scalar-lrint.c &
-tools/xngen src/qs8-dwconv/unipass-scalar.c.in -D CHANNEL_TILE=2 -D KERNEL_TILE=9 -D REQUANTIZATION=FP32 -D VARIANT=LRINT -D DATATYPE=QC8 -o src/qc8-dwconv/gen/up2x9-minmax-fp32-scalar-lrint.c &
-tools/xngen src/qs8-dwconv/unipass-scalar.c.in -D CHANNEL_TILE=4 -D KERNEL_TILE=9 -D REQUANTIZATION=FP32 -D VARIANT=LRINT -D DATATYPE=QC8 -o src/qc8-dwconv/gen/up4x9-minmax-fp32-scalar-lrint.c &
+tools/xngen src/qs8-dwconv/unipass-scalar.c.in -D CHANNEL_TILE=1 -D KERNEL_TILE=9 -D REQUANTIZATION=FP32 -D VARIANT=LRINTF -D DATATYPE=QC8 -o src/qc8-dwconv/gen/up1x9-minmax-fp32-scalar-lrintf.c &
+tools/xngen src/qs8-dwconv/unipass-scalar.c.in -D CHANNEL_TILE=2 -D KERNEL_TILE=9 -D REQUANTIZATION=FP32 -D VARIANT=LRINTF -D DATATYPE=QC8 -o src/qc8-dwconv/gen/up2x9-minmax-fp32-scalar-lrintf.c &
+tools/xngen src/qs8-dwconv/unipass-scalar.c.in -D CHANNEL_TILE=4 -D KERNEL_TILE=9 -D REQUANTIZATION=FP32 -D VARIANT=LRINTF -D DATATYPE=QC8 -o src/qc8-dwconv/gen/up4x9-minmax-fp32-scalar-lrintf.c &
-tools/xngen src/qs8-dwconv/unipass-scalar.c.in -D CHANNEL_TILE=1 -D KERNEL_TILE=9 -D REQUANTIZATION=FP32 -D VARIANT=LRINT -D DATATYPE=QS8 -o src/qs8-dwconv/gen/up1x9-minmax-fp32-scalar-lrint.c &
-tools/xngen src/qs8-dwconv/unipass-scalar.c.in -D CHANNEL_TILE=2 -D KERNEL_TILE=9 -D REQUANTIZATION=FP32 -D VARIANT=LRINT -D DATATYPE=QS8 -o src/qs8-dwconv/gen/up2x9-minmax-fp32-scalar-lrint.c &
-tools/xngen src/qs8-dwconv/unipass-scalar.c.in -D CHANNEL_TILE=4 -D KERNEL_TILE=9 -D REQUANTIZATION=FP32 -D VARIANT=LRINT -D DATATYPE=QS8 -o src/qs8-dwconv/gen/up4x9-minmax-fp32-scalar-lrint.c &
+tools/xngen src/qs8-dwconv/unipass-scalar.c.in -D CHANNEL_TILE=1 -D KERNEL_TILE=9 -D REQUANTIZATION=FP32 -D VARIANT=LRINTF -D DATATYPE=QS8 -o src/qs8-dwconv/gen/up1x9-minmax-fp32-scalar-lrintf.c &
+tools/xngen src/qs8-dwconv/unipass-scalar.c.in -D CHANNEL_TILE=2 -D KERNEL_TILE=9 -D REQUANTIZATION=FP32 -D VARIANT=LRINTF -D DATATYPE=QS8 -o src/qs8-dwconv/gen/up2x9-minmax-fp32-scalar-lrintf.c &
+tools/xngen src/qs8-dwconv/unipass-scalar.c.in -D CHANNEL_TILE=4 -D KERNEL_TILE=9 -D REQUANTIZATION=FP32 -D VARIANT=LRINTF -D DATATYPE=QS8 -o src/qs8-dwconv/gen/up4x9-minmax-fp32-scalar-lrintf.c &
-tools/xngen src/qs8-dwconv/unipass-scalar.c.in -D CHANNEL_TILE=1 -D KERNEL_TILE=9 -D REQUANTIZATION=FP32 -D VARIANT=LRINT -D DATATYPE=QU8 -o src/qu8-dwconv/gen/up1x9-minmax-fp32-scalar-lrint.c &
-tools/xngen src/qs8-dwconv/unipass-scalar.c.in -D CHANNEL_TILE=2 -D KERNEL_TILE=9 -D REQUANTIZATION=FP32 -D VARIANT=LRINT -D DATATYPE=QU8 -o src/qu8-dwconv/gen/up2x9-minmax-fp32-scalar-lrint.c &
-tools/xngen src/qs8-dwconv/unipass-scalar.c.in -D CHANNEL_TILE=4 -D KERNEL_TILE=9 -D REQUANTIZATION=FP32 -D VARIANT=LRINT -D DATATYPE=QU8 -o src/qu8-dwconv/gen/up4x9-minmax-fp32-scalar-lrint.c &
+tools/xngen src/qs8-dwconv/unipass-scalar.c.in -D CHANNEL_TILE=1 -D KERNEL_TILE=9 -D REQUANTIZATION=FP32 -D VARIANT=LRINTF -D DATATYPE=QU8 -o src/qu8-dwconv/gen/up1x9-minmax-fp32-scalar-lrintf.c &
+tools/xngen src/qs8-dwconv/unipass-scalar.c.in -D CHANNEL_TILE=2 -D KERNEL_TILE=9 -D REQUANTIZATION=FP32 -D VARIANT=LRINTF -D DATATYPE=QU8 -o src/qu8-dwconv/gen/up2x9-minmax-fp32-scalar-lrintf.c &
+tools/xngen src/qs8-dwconv/unipass-scalar.c.in -D CHANNEL_TILE=4 -D KERNEL_TILE=9 -D REQUANTIZATION=FP32 -D VARIANT=LRINTF -D DATATYPE=QU8 -o src/qu8-dwconv/gen/up4x9-minmax-fp32-scalar-lrintf.c &
-tools/xngen src/qs8-dwconv/unipass-scalar.c.in -D CHANNEL_TILE=1 -D KERNEL_TILE=9 -D REQUANTIZATION=FP32 -D VARIANT=MAGIC -D DATATYPE=QC8 -o src/qc8-dwconv/gen/up1x9-minmax-fp32-scalar-magic.c &
-tools/xngen src/qs8-dwconv/unipass-scalar.c.in -D CHANNEL_TILE=2 -D KERNEL_TILE=9 -D REQUANTIZATION=FP32 -D VARIANT=MAGIC -D DATATYPE=QC8 -o src/qc8-dwconv/gen/up2x9-minmax-fp32-scalar-magic.c &
-tools/xngen src/qs8-dwconv/unipass-scalar.c.in -D CHANNEL_TILE=4 -D KERNEL_TILE=9 -D REQUANTIZATION=FP32 -D VARIANT=MAGIC -D DATATYPE=QC8 -o src/qc8-dwconv/gen/up4x9-minmax-fp32-scalar-magic.c &
+tools/xngen src/qs8-dwconv/unipass-scalar.c.in -D CHANNEL_TILE=1 -D KERNEL_TILE=9 -D REQUANTIZATION=FP32 -D VARIANT=FMAGIC -D DATATYPE=QC8 -o src/qc8-dwconv/gen/up1x9-minmax-fp32-scalar-fmagic.c &
+tools/xngen src/qs8-dwconv/unipass-scalar.c.in -D CHANNEL_TILE=2 -D KERNEL_TILE=9 -D REQUANTIZATION=FP32 -D VARIANT=FMAGIC -D DATATYPE=QC8 -o src/qc8-dwconv/gen/up2x9-minmax-fp32-scalar-fmagic.c &
+tools/xngen src/qs8-dwconv/unipass-scalar.c.in -D CHANNEL_TILE=4 -D KERNEL_TILE=9 -D REQUANTIZATION=FP32 -D VARIANT=FMAGIC -D DATATYPE=QC8 -o src/qc8-dwconv/gen/up4x9-minmax-fp32-scalar-fmagic.c &
-tools/xngen src/qs8-dwconv/unipass-scalar.c.in -D CHANNEL_TILE=1 -D KERNEL_TILE=9 -D REQUANTIZATION=FP32 -D VARIANT=MAGIC -D DATATYPE=QS8 -o src/qs8-dwconv/gen/up1x9-minmax-fp32-scalar-magic.c &
-tools/xngen src/qs8-dwconv/unipass-scalar.c.in -D CHANNEL_TILE=2 -D KERNEL_TILE=9 -D REQUANTIZATION=FP32 -D VARIANT=MAGIC -D DATATYPE=QS8 -o src/qs8-dwconv/gen/up2x9-minmax-fp32-scalar-magic.c &
-tools/xngen src/qs8-dwconv/unipass-scalar.c.in -D CHANNEL_TILE=4 -D KERNEL_TILE=9 -D REQUANTIZATION=FP32 -D VARIANT=MAGIC -D DATATYPE=QS8 -o src/qs8-dwconv/gen/up4x9-minmax-fp32-scalar-magic.c &
+tools/xngen src/qs8-dwconv/unipass-scalar.c.in -D CHANNEL_TILE=1 -D KERNEL_TILE=9 -D REQUANTIZATION=FP32 -D VARIANT=FMAGIC -D DATATYPE=QS8 -o src/qs8-dwconv/gen/up1x9-minmax-fp32-scalar-fmagic.c &
+tools/xngen src/qs8-dwconv/unipass-scalar.c.in -D CHANNEL_TILE=2 -D KERNEL_TILE=9 -D REQUANTIZATION=FP32 -D VARIANT=FMAGIC -D DATATYPE=QS8 -o src/qs8-dwconv/gen/up2x9-minmax-fp32-scalar-fmagic.c &
+tools/xngen src/qs8-dwconv/unipass-scalar.c.in -D CHANNEL_TILE=4 -D KERNEL_TILE=9 -D REQUANTIZATION=FP32 -D VARIANT=FMAGIC -D DATATYPE=QS8 -o src/qs8-dwconv/gen/up4x9-minmax-fp32-scalar-fmagic.c &
-tools/xngen src/qs8-dwconv/unipass-scalar.c.in -D CHANNEL_TILE=1 -D KERNEL_TILE=9 -D REQUANTIZATION=FP32 -D VARIANT=MAGIC -D DATATYPE=QU8 -o src/qu8-dwconv/gen/up1x9-minmax-fp32-scalar-magic.c &
-tools/xngen src/qs8-dwconv/unipass-scalar.c.in -D CHANNEL_TILE=2 -D KERNEL_TILE=9 -D REQUANTIZATION=FP32 -D VARIANT=MAGIC -D DATATYPE=QU8 -o src/qu8-dwconv/gen/up2x9-minmax-fp32-scalar-magic.c &
-tools/xngen src/qs8-dwconv/unipass-scalar.c.in -D CHANNEL_TILE=4 -D KERNEL_TILE=9 -D REQUANTIZATION=FP32 -D VARIANT=MAGIC -D DATATYPE=QU8 -o src/qu8-dwconv/gen/up4x9-minmax-fp32-scalar-magic.c &
+tools/xngen src/qs8-dwconv/unipass-scalar.c.in -D CHANNEL_TILE=1 -D KERNEL_TILE=9 -D REQUANTIZATION=FP32 -D VARIANT=FMAGIC -D DATATYPE=QU8 -o src/qu8-dwconv/gen/up1x9-minmax-fp32-scalar-fmagic.c &
+tools/xngen src/qs8-dwconv/unipass-scalar.c.in -D CHANNEL_TILE=2 -D KERNEL_TILE=9 -D REQUANTIZATION=FP32 -D VARIANT=FMAGIC -D DATATYPE=QU8 -o src/qu8-dwconv/gen/up2x9-minmax-fp32-scalar-fmagic.c &
+tools/xngen src/qs8-dwconv/unipass-scalar.c.in -D CHANNEL_TILE=4 -D KERNEL_TILE=9 -D REQUANTIZATION=FP32 -D VARIANT=FMAGIC -D DATATYPE=QU8 -o src/qu8-dwconv/gen/up4x9-minmax-fp32-scalar-fmagic.c &
-tools/xngen src/qs8-dwconv/unipass-scalar.c.in -D CHANNEL_TILE=1 -D KERNEL_TILE=25 -D REQUANTIZATION=FP32 -D VARIANT=LRINT -D DATATYPE=QC8 -o src/qc8-dwconv/gen/up1x25-minmax-fp32-scalar-lrint.c &
-tools/xngen src/qs8-dwconv/unipass-scalar.c.in -D CHANNEL_TILE=2 -D KERNEL_TILE=25 -D REQUANTIZATION=FP32 -D VARIANT=LRINT -D DATATYPE=QC8 -o src/qc8-dwconv/gen/up2x25-minmax-fp32-scalar-lrint.c &
-tools/xngen src/qs8-dwconv/unipass-scalar.c.in -D CHANNEL_TILE=4 -D KERNEL_TILE=25 -D REQUANTIZATION=FP32 -D VARIANT=LRINT -D DATATYPE=QC8 -o src/qc8-dwconv/gen/up4x25-minmax-fp32-scalar-lrint.c &
+tools/xngen src/qs8-dwconv/unipass-scalar.c.in -D CHANNEL_TILE=1 -D KERNEL_TILE=25 -D REQUANTIZATION=FP32 -D VARIANT=LRINTF -D DATATYPE=QC8 -o src/qc8-dwconv/gen/up1x25-minmax-fp32-scalar-lrintf.c &
+tools/xngen src/qs8-dwconv/unipass-scalar.c.in -D CHANNEL_TILE=2 -D KERNEL_TILE=25 -D REQUANTIZATION=FP32 -D VARIANT=LRINTF -D DATATYPE=QC8 -o src/qc8-dwconv/gen/up2x25-minmax-fp32-scalar-lrintf.c &
+tools/xngen src/qs8-dwconv/unipass-scalar.c.in -D CHANNEL_TILE=4 -D KERNEL_TILE=25 -D REQUANTIZATION=FP32 -D VARIANT=LRINTF -D DATATYPE=QC8 -o src/qc8-dwconv/gen/up4x25-minmax-fp32-scalar-lrintf.c &
-tools/xngen src/qs8-dwconv/unipass-scalar.c.in -D CHANNEL_TILE=1 -D KERNEL_TILE=25 -D REQUANTIZATION=FP32 -D VARIANT=LRINT -D DATATYPE=QS8 -o src/qs8-dwconv/gen/up1x25-minmax-fp32-scalar-lrint.c &
-tools/xngen src/qs8-dwconv/unipass-scalar.c.in -D CHANNEL_TILE=2 -D KERNEL_TILE=25 -D REQUANTIZATION=FP32 -D VARIANT=LRINT -D DATATYPE=QS8 -o src/qs8-dwconv/gen/up2x25-minmax-fp32-scalar-lrint.c &
-tools/xngen src/qs8-dwconv/unipass-scalar.c.in -D CHANNEL_TILE=4 -D KERNEL_TILE=25 -D REQUANTIZATION=FP32 -D VARIANT=LRINT -D DATATYPE=QS8 -o src/qs8-dwconv/gen/up4x25-minmax-fp32-scalar-lrint.c &
+tools/xngen src/qs8-dwconv/unipass-scalar.c.in -D CHANNEL_TILE=1 -D KERNEL_TILE=25 -D REQUANTIZATION=FP32 -D VARIANT=LRINTF -D DATATYPE=QS8 -o src/qs8-dwconv/gen/up1x25-minmax-fp32-scalar-lrintf.c &
+tools/xngen src/qs8-dwconv/unipass-scalar.c.in -D CHANNEL_TILE=2 -D KERNEL_TILE=25 -D REQUANTIZATION=FP32 -D VARIANT=LRINTF -D DATATYPE=QS8 -o src/qs8-dwconv/gen/up2x25-minmax-fp32-scalar-lrintf.c &
+tools/xngen src/qs8-dwconv/unipass-scalar.c.in -D CHANNEL_TILE=4 -D KERNEL_TILE=25 -D REQUANTIZATION=FP32 -D VARIANT=LRINTF -D DATATYPE=QS8 -o src/qs8-dwconv/gen/up4x25-minmax-fp32-scalar-lrintf.c &
-tools/xngen src/qs8-dwconv/unipass-scalar.c.in -D CHANNEL_TILE=1 -D KERNEL_TILE=25 -D REQUANTIZATION=FP32 -D VARIANT=LRINT -D DATATYPE=QU8 -o src/qu8-dwconv/gen/up1x25-minmax-fp32-scalar-lrint.c &
-tools/xngen src/qs8-dwconv/unipass-scalar.c.in -D CHANNEL_TILE=2 -D KERNEL_TILE=25 -D REQUANTIZATION=FP32 -D VARIANT=LRINT -D DATATYPE=QU8 -o src/qu8-dwconv/gen/up2x25-minmax-fp32-scalar-lrint.c &
-tools/xngen src/qs8-dwconv/unipass-scalar.c.in -D CHANNEL_TILE=4 -D KERNEL_TILE=25 -D REQUANTIZATION=FP32 -D VARIANT=LRINT -D DATATYPE=QU8 -o src/qu8-dwconv/gen/up4x25-minmax-fp32-scalar-lrint.c &
+tools/xngen src/qs8-dwconv/unipass-scalar.c.in -D CHANNEL_TILE=1 -D KERNEL_TILE=25 -D REQUANTIZATION=FP32 -D VARIANT=LRINTF -D DATATYPE=QU8 -o src/qu8-dwconv/gen/up1x25-minmax-fp32-scalar-lrintf.c &
+tools/xngen src/qs8-dwconv/unipass-scalar.c.in -D CHANNEL_TILE=2 -D KERNEL_TILE=25 -D REQUANTIZATION=FP32 -D VARIANT=LRINTF -D DATATYPE=QU8 -o src/qu8-dwconv/gen/up2x25-minmax-fp32-scalar-lrintf.c &
+tools/xngen src/qs8-dwconv/unipass-scalar.c.in -D CHANNEL_TILE=4 -D KERNEL_TILE=25 -D REQUANTIZATION=FP32 -D VARIANT=LRINTF -D DATATYPE=QU8 -o src/qu8-dwconv/gen/up4x25-minmax-fp32-scalar-lrintf.c &
-tools/xngen src/qs8-dwconv/unipass-scalar.c.in -D CHANNEL_TILE=1 -D KERNEL_TILE=25 -D REQUANTIZATION=FP32 -D VARIANT=MAGIC -D DATATYPE=QC8 -o src/qc8-dwconv/gen/up1x25-minmax-fp32-scalar-magic.c &
-tools/xngen src/qs8-dwconv/unipass-scalar.c.in -D CHANNEL_TILE=2 -D KERNEL_TILE=25 -D REQUANTIZATION=FP32 -D VARIANT=MAGIC -D DATATYPE=QC8 -o src/qc8-dwconv/gen/up2x25-minmax-fp32-scalar-magic.c &
-tools/xngen src/qs8-dwconv/unipass-scalar.c.in -D CHANNEL_TILE=4 -D KERNEL_TILE=25 -D REQUANTIZATION=FP32 -D VARIANT=MAGIC -D DATATYPE=QC8 -o src/qc8-dwconv/gen/up4x25-minmax-fp32-scalar-magic.c &
+tools/xngen src/qs8-dwconv/unipass-scalar.c.in -D CHANNEL_TILE=1 -D KERNEL_TILE=25 -D REQUANTIZATION=FP32 -D VARIANT=FMAGIC -D DATATYPE=QC8 -o src/qc8-dwconv/gen/up1x25-minmax-fp32-scalar-fmagic.c &
+tools/xngen src/qs8-dwconv/unipass-scalar.c.in -D CHANNEL_TILE=2 -D KERNEL_TILE=25 -D REQUANTIZATION=FP32 -D VARIANT=FMAGIC -D DATATYPE=QC8 -o src/qc8-dwconv/gen/up2x25-minmax-fp32-scalar-fmagic.c &
+tools/xngen src/qs8-dwconv/unipass-scalar.c.in -D CHANNEL_TILE=4 -D KERNEL_TILE=25 -D REQUANTIZATION=FP32 -D VARIANT=FMAGIC -D DATATYPE=QC8 -o src/qc8-dwconv/gen/up4x25-minmax-fp32-scalar-fmagic.c &
-tools/xngen src/qs8-dwconv/unipass-scalar.c.in -D CHANNEL_TILE=1 -D KERNEL_TILE=25 -D REQUANTIZATION=FP32 -D VARIANT=MAGIC -D DATATYPE=QS8 -o src/qs8-dwconv/gen/up1x25-minmax-fp32-scalar-magic.c &
-tools/xngen src/qs8-dwconv/unipass-scalar.c.in -D CHANNEL_TILE=2 -D KERNEL_TILE=25 -D REQUANTIZATION=FP32 -D VARIANT=MAGIC -D DATATYPE=QS8 -o src/qs8-dwconv/gen/up2x25-minmax-fp32-scalar-magic.c &
-tools/xngen src/qs8-dwconv/unipass-scalar.c.in -D CHANNEL_TILE=4 -D KERNEL_TILE=25 -D REQUANTIZATION=FP32 -D VARIANT=MAGIC -D DATATYPE=QS8 -o src/qs8-dwconv/gen/up4x25-minmax-fp32-scalar-magic.c &
+tools/xngen src/qs8-dwconv/unipass-scalar.c.in -D CHANNEL_TILE=1 -D KERNEL_TILE=25 -D REQUANTIZATION=FP32 -D VARIANT=FMAGIC -D DATATYPE=QS8 -o src/qs8-dwconv/gen/up1x25-minmax-fp32-scalar-fmagic.c &
+tools/xngen src/qs8-dwconv/unipass-scalar.c.in -D CHANNEL_TILE=2 -D KERNEL_TILE=25 -D REQUANTIZATION=FP32 -D VARIANT=FMAGIC -D DATATYPE=QS8 -o src/qs8-dwconv/gen/up2x25-minmax-fp32-scalar-fmagic.c &
+tools/xngen src/qs8-dwconv/unipass-scalar.c.in -D CHANNEL_TILE=4 -D KERNEL_TILE=25 -D REQUANTIZATION=FP32 -D VARIANT=FMAGIC -D DATATYPE=QS8 -o src/qs8-dwconv/gen/up4x25-minmax-fp32-scalar-fmagic.c &
-tools/xngen src/qs8-dwconv/unipass-scalar.c.in -D CHANNEL_TILE=1 -D KERNEL_TILE=25 -D REQUANTIZATION=FP32 -D VARIANT=MAGIC -D DATATYPE=QU8 -o src/qu8-dwconv/gen/up1x25-minmax-fp32-scalar-magic.c &
-tools/xngen src/qs8-dwconv/unipass-scalar.c.in -D CHANNEL_TILE=2 -D KERNEL_TILE=25 -D REQUANTIZATION=FP32 -D VARIANT=MAGIC -D DATATYPE=QU8 -o src/qu8-dwconv/gen/up2x25-minmax-fp32-scalar-magic.c &
-tools/xngen src/qs8-dwconv/unipass-scalar.c.in -D CHANNEL_TILE=4 -D KERNEL_TILE=25 -D REQUANTIZATION=FP32 -D VARIANT=MAGIC -D DATATYPE=QU8 -o src/qu8-dwconv/gen/up4x25-minmax-fp32-scalar-magic.c &
+tools/xngen src/qs8-dwconv/unipass-scalar.c.in -D CHANNEL_TILE=1 -D KERNEL_TILE=25 -D REQUANTIZATION=FP32 -D VARIANT=FMAGIC -D DATATYPE=QU8 -o src/qu8-dwconv/gen/up1x25-minmax-fp32-scalar-fmagic.c &
+tools/xngen src/qs8-dwconv/unipass-scalar.c.in -D CHANNEL_TILE=2 -D KERNEL_TILE=25 -D REQUANTIZATION=FP32 -D VARIANT=FMAGIC -D DATATYPE=QU8 -o src/qu8-dwconv/gen/up2x25-minmax-fp32-scalar-fmagic.c &
+tools/xngen src/qs8-dwconv/unipass-scalar.c.in -D CHANNEL_TILE=4 -D KERNEL_TILE=25 -D REQUANTIZATION=FP32 -D VARIANT=FMAGIC -D DATATYPE=QU8 -o src/qu8-dwconv/gen/up4x25-minmax-fp32-scalar-fmagic.c &
################################## ARM NEON ##################################
tools/xngen src/qs8-dwconv/unipass-neon-mul8.c.in -D CHANNEL_TILE=8 -D KERNEL_TILE=9 -D REQUANTIZATION=FP32 -D DATATYPE=QC8 -D MLA=0 -D LOAD_VARIANT=LD64 -D ARMV8=0 -o src/qc8-dwconv/gen/up8x9-minmax-fp32-neon-mul8-ld64.c &
diff --git a/scripts/generate-qs8-gemm.sh b/scripts/generate-qs8-gemm.sh
index 62359c0..a8f254c 100755
--- a/scripts/generate-qs8-gemm.sh
+++ b/scripts/generate-qs8-gemm.sh
@@ -5,75 +5,65 @@
# LICENSE file in the root directory of this source tree.
#################################### Scalar ###################################
-tools/xngen src/qs8-gemm/scalar.c.in -D MR=1 -D NR=2 -D REQUANTIZATION=RNDNU -D VARIANT= -D DATATYPE=QS8 -D WASM=0 -o src/qs8-gemm/gen/1x2-minmax-rndnu-scalar.c &
-tools/xngen src/qs8-gemm/scalar.c.in -D MR=2 -D NR=2 -D REQUANTIZATION=RNDNU -D VARIANT= -D DATATYPE=QS8 -D WASM=0 -o src/qs8-gemm/gen/2x2-minmax-rndnu-scalar.c &
-tools/xngen src/qs8-gemm/scalar.c.in -D MR=3 -D NR=2 -D REQUANTIZATION=RNDNU -D VARIANT= -D DATATYPE=QS8 -D WASM=0 -o src/qs8-gemm/gen/3x2-minmax-rndnu-scalar.c &
-tools/xngen src/qs8-gemm/scalar.c.in -D MR=4 -D NR=2 -D REQUANTIZATION=RNDNU -D VARIANT= -D DATATYPE=QS8 -D WASM=0 -o src/qs8-gemm/gen/4x2-minmax-rndnu-scalar.c &
+tools/xngen src/qs8-gemm/scalar.c.in -D MR=1 -D NR=2 -D REQUANTIZATION=FP32 -D VARIANT=FMAGIC -D DATATYPE=QC8 -D WASM=0 -o src/qc8-gemm/gen/1x2-minmax-fp32-scalar-fmagic.c &
+tools/xngen src/qs8-gemm/scalar.c.in -D MR=2 -D NR=2 -D REQUANTIZATION=FP32 -D VARIANT=FMAGIC -D DATATYPE=QC8 -D WASM=0 -o src/qc8-gemm/gen/2x2-minmax-fp32-scalar-fmagic.c &
+tools/xngen src/qs8-gemm/scalar.c.in -D MR=3 -D NR=2 -D REQUANTIZATION=FP32 -D VARIANT=FMAGIC -D DATATYPE=QC8 -D WASM=0 -o src/qc8-gemm/gen/3x2-minmax-fp32-scalar-fmagic.c &
+tools/xngen src/qs8-gemm/scalar.c.in -D MR=4 -D NR=2 -D REQUANTIZATION=FP32 -D VARIANT=FMAGIC -D DATATYPE=QC8 -D WASM=0 -o src/qc8-gemm/gen/4x2-minmax-fp32-scalar-fmagic.c &
-tools/xngen src/qs8-gemm/scalar.c.in -D MR=1 -D NR=2 -D REQUANTIZATION=FP32 -D VARIANT=MAGIC -D DATATYPE=QC8 -D WASM=0 -o src/qc8-gemm/gen/1x2-minmax-fp32-scalar-magic.c &
-tools/xngen src/qs8-gemm/scalar.c.in -D MR=2 -D NR=2 -D REQUANTIZATION=FP32 -D VARIANT=MAGIC -D DATATYPE=QC8 -D WASM=0 -o src/qc8-gemm/gen/2x2-minmax-fp32-scalar-magic.c &
-tools/xngen src/qs8-gemm/scalar.c.in -D MR=3 -D NR=2 -D REQUANTIZATION=FP32 -D VARIANT=MAGIC -D DATATYPE=QC8 -D WASM=0 -o src/qc8-gemm/gen/3x2-minmax-fp32-scalar-magic.c &
-tools/xngen src/qs8-gemm/scalar.c.in -D MR=4 -D NR=2 -D REQUANTIZATION=FP32 -D VARIANT=MAGIC -D DATATYPE=QC8 -D WASM=0 -o src/qc8-gemm/gen/4x2-minmax-fp32-scalar-magic.c &
+tools/xngen src/qs8-gemm/scalar.c.in -D MR=1 -D NR=2 -D REQUANTIZATION=FP32 -D VARIANT=FMAGIC -D DATATYPE=QS8 -D WASM=0 -o src/qs8-gemm/gen/1x2-minmax-fp32-scalar-fmagic.c &
+tools/xngen src/qs8-gemm/scalar.c.in -D MR=2 -D NR=2 -D REQUANTIZATION=FP32 -D VARIANT=FMAGIC -D DATATYPE=QS8 -D WASM=0 -o src/qs8-gemm/gen/2x2-minmax-fp32-scalar-fmagic.c &
+tools/xngen src/qs8-gemm/scalar.c.in -D MR=3 -D NR=2 -D REQUANTIZATION=FP32 -D VARIANT=FMAGIC -D DATATYPE=QS8 -D WASM=0 -o src/qs8-gemm/gen/3x2-minmax-fp32-scalar-fmagic.c &
+tools/xngen src/qs8-gemm/scalar.c.in -D MR=4 -D NR=2 -D REQUANTIZATION=FP32 -D VARIANT=FMAGIC -D DATATYPE=QS8 -D WASM=0 -o src/qs8-gemm/gen/4x2-minmax-fp32-scalar-fmagic.c &
-tools/xngen src/qs8-gemm/scalar.c.in -D MR=1 -D NR=2 -D REQUANTIZATION=FP32 -D VARIANT=MAGIC -D DATATYPE=QS8 -D WASM=0 -o src/qs8-gemm/gen/1x2-minmax-fp32-scalar-magic.c &
-tools/xngen src/qs8-gemm/scalar.c.in -D MR=2 -D NR=2 -D REQUANTIZATION=FP32 -D VARIANT=MAGIC -D DATATYPE=QS8 -D WASM=0 -o src/qs8-gemm/gen/2x2-minmax-fp32-scalar-magic.c &
-tools/xngen src/qs8-gemm/scalar.c.in -D MR=3 -D NR=2 -D REQUANTIZATION=FP32 -D VARIANT=MAGIC -D DATATYPE=QS8 -D WASM=0 -o src/qs8-gemm/gen/3x2-minmax-fp32-scalar-magic.c &
-tools/xngen src/qs8-gemm/scalar.c.in -D MR=4 -D NR=2 -D REQUANTIZATION=FP32 -D VARIANT=MAGIC -D DATATYPE=QS8 -D WASM=0 -o src/qs8-gemm/gen/4x2-minmax-fp32-scalar-magic.c &
+tools/xngen src/qs8-gemm/scalar.c.in -D MR=1 -D NR=2 -D REQUANTIZATION=FP32 -D VARIANT=FMAGIC -D DATATYPE=QU8 -D WASM=0 -o src/qu8-gemm/gen/1x2-minmax-fp32-scalar-fmagic.c &
+tools/xngen src/qs8-gemm/scalar.c.in -D MR=2 -D NR=2 -D REQUANTIZATION=FP32 -D VARIANT=FMAGIC -D DATATYPE=QU8 -D WASM=0 -o src/qu8-gemm/gen/2x2-minmax-fp32-scalar-fmagic.c &
+tools/xngen src/qs8-gemm/scalar.c.in -D MR=3 -D NR=2 -D REQUANTIZATION=FP32 -D VARIANT=FMAGIC -D DATATYPE=QU8 -D WASM=0 -o src/qu8-gemm/gen/3x2-minmax-fp32-scalar-fmagic.c &
+tools/xngen src/qs8-gemm/scalar.c.in -D MR=4 -D NR=2 -D REQUANTIZATION=FP32 -D VARIANT=FMAGIC -D DATATYPE=QU8 -D WASM=0 -o src/qu8-gemm/gen/4x2-minmax-fp32-scalar-fmagic.c &
-tools/xngen src/qs8-gemm/scalar.c.in -D MR=1 -D NR=2 -D REQUANTIZATION=FP32 -D VARIANT=MAGIC -D DATATYPE=QU8 -D WASM=0 -o src/qu8-gemm/gen/1x2-minmax-fp32-scalar-magic.c &
-tools/xngen src/qs8-gemm/scalar.c.in -D MR=2 -D NR=2 -D REQUANTIZATION=FP32 -D VARIANT=MAGIC -D DATATYPE=QU8 -D WASM=0 -o src/qu8-gemm/gen/2x2-minmax-fp32-scalar-magic.c &
-tools/xngen src/qs8-gemm/scalar.c.in -D MR=3 -D NR=2 -D REQUANTIZATION=FP32 -D VARIANT=MAGIC -D DATATYPE=QU8 -D WASM=0 -o src/qu8-gemm/gen/3x2-minmax-fp32-scalar-magic.c &
-tools/xngen src/qs8-gemm/scalar.c.in -D MR=4 -D NR=2 -D REQUANTIZATION=FP32 -D VARIANT=MAGIC -D DATATYPE=QU8 -D WASM=0 -o src/qu8-gemm/gen/4x2-minmax-fp32-scalar-magic.c &
+tools/xngen src/qs8-gemm/scalar.c.in -D MR=1 -D NR=2 -D REQUANTIZATION=FP32 -D VARIANT=LRINTF -D DATATYPE=QC8 -D WASM=0 -o src/qc8-gemm/gen/1x2-minmax-fp32-scalar-lrintf.c &
+tools/xngen src/qs8-gemm/scalar.c.in -D MR=2 -D NR=2 -D REQUANTIZATION=FP32 -D VARIANT=LRINTF -D DATATYPE=QC8 -D WASM=0 -o src/qc8-gemm/gen/2x2-minmax-fp32-scalar-lrintf.c &
+tools/xngen src/qs8-gemm/scalar.c.in -D MR=3 -D NR=2 -D REQUANTIZATION=FP32 -D VARIANT=LRINTF -D DATATYPE=QC8 -D WASM=0 -o src/qc8-gemm/gen/3x2-minmax-fp32-scalar-lrintf.c &
+tools/xngen src/qs8-gemm/scalar.c.in -D MR=4 -D NR=2 -D REQUANTIZATION=FP32 -D VARIANT=LRINTF -D DATATYPE=QC8 -D WASM=0 -o src/qc8-gemm/gen/4x2-minmax-fp32-scalar-lrintf.c &
-tools/xngen src/qs8-gemm/scalar.c.in -D MR=1 -D NR=2 -D REQUANTIZATION=FP32 -D VARIANT=LRINT -D DATATYPE=QC8 -D WASM=0 -o src/qc8-gemm/gen/1x2-minmax-fp32-scalar-lrint.c &
-tools/xngen src/qs8-gemm/scalar.c.in -D MR=2 -D NR=2 -D REQUANTIZATION=FP32 -D VARIANT=LRINT -D DATATYPE=QC8 -D WASM=0 -o src/qc8-gemm/gen/2x2-minmax-fp32-scalar-lrint.c &
-tools/xngen src/qs8-gemm/scalar.c.in -D MR=3 -D NR=2 -D REQUANTIZATION=FP32 -D VARIANT=LRINT -D DATATYPE=QC8 -D WASM=0 -o src/qc8-gemm/gen/3x2-minmax-fp32-scalar-lrint.c &
-tools/xngen src/qs8-gemm/scalar.c.in -D MR=4 -D NR=2 -D REQUANTIZATION=FP32 -D VARIANT=LRINT -D DATATYPE=QC8 -D WASM=0 -o src/qc8-gemm/gen/4x2-minmax-fp32-scalar-lrint.c &
+tools/xngen src/qs8-gemm/scalar.c.in -D MR=1 -D NR=2 -D REQUANTIZATION=FP32 -D VARIANT=LRINTF -D DATATYPE=QS8 -D WASM=0 -o src/qs8-gemm/gen/1x2-minmax-fp32-scalar-lrintf.c &
+tools/xngen src/qs8-gemm/scalar.c.in -D MR=2 -D NR=2 -D REQUANTIZATION=FP32 -D VARIANT=LRINTF -D DATATYPE=QS8 -D WASM=0 -o src/qs8-gemm/gen/2x2-minmax-fp32-scalar-lrintf.c &
+tools/xngen src/qs8-gemm/scalar.c.in -D MR=3 -D NR=2 -D REQUANTIZATION=FP32 -D VARIANT=LRINTF -D DATATYPE=QS8 -D WASM=0 -o src/qs8-gemm/gen/3x2-minmax-fp32-scalar-lrintf.c &
+tools/xngen src/qs8-gemm/scalar.c.in -D MR=4 -D NR=2 -D REQUANTIZATION=FP32 -D VARIANT=LRINTF -D DATATYPE=QS8 -D WASM=0 -o src/qs8-gemm/gen/4x2-minmax-fp32-scalar-lrintf.c &
-tools/xngen src/qs8-gemm/scalar.c.in -D MR=1 -D NR=2 -D REQUANTIZATION=FP32 -D VARIANT=LRINT -D DATATYPE=QS8 -D WASM=0 -o src/qs8-gemm/gen/1x2-minmax-fp32-scalar-lrint.c &
-tools/xngen src/qs8-gemm/scalar.c.in -D MR=2 -D NR=2 -D REQUANTIZATION=FP32 -D VARIANT=LRINT -D DATATYPE=QS8 -D WASM=0 -o src/qs8-gemm/gen/2x2-minmax-fp32-scalar-lrint.c &
-tools/xngen src/qs8-gemm/scalar.c.in -D MR=3 -D NR=2 -D REQUANTIZATION=FP32 -D VARIANT=LRINT -D DATATYPE=QS8 -D WASM=0 -o src/qs8-gemm/gen/3x2-minmax-fp32-scalar-lrint.c &
-tools/xngen src/qs8-gemm/scalar.c.in -D MR=4 -D NR=2 -D REQUANTIZATION=FP32 -D VARIANT=LRINT -D DATATYPE=QS8 -D WASM=0 -o src/qs8-gemm/gen/4x2-minmax-fp32-scalar-lrint.c &
+tools/xngen src/qs8-gemm/scalar.c.in -D MR=1 -D NR=2 -D REQUANTIZATION=FP32 -D VARIANT=LRINTF -D DATATYPE=QU8 -D WASM=0 -o src/qu8-gemm/gen/1x2-minmax-fp32-scalar-lrintf.c &
+tools/xngen src/qs8-gemm/scalar.c.in -D MR=2 -D NR=2 -D REQUANTIZATION=FP32 -D VARIANT=LRINTF -D DATATYPE=QU8 -D WASM=0 -o src/qu8-gemm/gen/2x2-minmax-fp32-scalar-lrintf.c &
+tools/xngen src/qs8-gemm/scalar.c.in -D MR=3 -D NR=2 -D REQUANTIZATION=FP32 -D VARIANT=LRINTF -D DATATYPE=QU8 -D WASM=0 -o src/qu8-gemm/gen/3x2-minmax-fp32-scalar-lrintf.c &
+tools/xngen src/qs8-gemm/scalar.c.in -D MR=4 -D NR=2 -D REQUANTIZATION=FP32 -D VARIANT=LRINTF -D DATATYPE=QU8 -D WASM=0 -o src/qu8-gemm/gen/4x2-minmax-fp32-scalar-lrintf.c &
-tools/xngen src/qs8-gemm/scalar.c.in -D MR=1 -D NR=2 -D REQUANTIZATION=FP32 -D VARIANT=LRINT -D DATATYPE=QU8 -D WASM=0 -o src/qu8-gemm/gen/1x2-minmax-fp32-scalar-lrint.c &
-tools/xngen src/qs8-gemm/scalar.c.in -D MR=2 -D NR=2 -D REQUANTIZATION=FP32 -D VARIANT=LRINT -D DATATYPE=QU8 -D WASM=0 -o src/qu8-gemm/gen/2x2-minmax-fp32-scalar-lrint.c &
-tools/xngen src/qs8-gemm/scalar.c.in -D MR=3 -D NR=2 -D REQUANTIZATION=FP32 -D VARIANT=LRINT -D DATATYPE=QU8 -D WASM=0 -o src/qu8-gemm/gen/3x2-minmax-fp32-scalar-lrint.c &
-tools/xngen src/qs8-gemm/scalar.c.in -D MR=4 -D NR=2 -D REQUANTIZATION=FP32 -D VARIANT=LRINT -D DATATYPE=QU8 -D WASM=0 -o src/qu8-gemm/gen/4x2-minmax-fp32-scalar-lrint.c &
+tools/xngen src/qs8-gemm/scalar.c.in -D MR=1 -D NR=4 -D REQUANTIZATION=FP32 -D VARIANT=FMAGIC -D DATATYPE=QC8 -D WASM=0 -o src/qc8-gemm/gen/1x4-minmax-fp32-scalar-fmagic.c &
+tools/xngen src/qs8-gemm/scalar.c.in -D MR=2 -D NR=4 -D REQUANTIZATION=FP32 -D VARIANT=FMAGIC -D DATATYPE=QC8 -D WASM=0 -o src/qc8-gemm/gen/2x4-minmax-fp32-scalar-fmagic.c &
+tools/xngen src/qs8-gemm/scalar.c.in -D MR=3 -D NR=4 -D REQUANTIZATION=FP32 -D VARIANT=FMAGIC -D DATATYPE=QC8 -D WASM=0 -o src/qc8-gemm/gen/3x4-minmax-fp32-scalar-fmagic.c &
+tools/xngen src/qs8-gemm/scalar.c.in -D MR=4 -D NR=4 -D REQUANTIZATION=FP32 -D VARIANT=FMAGIC -D DATATYPE=QC8 -D WASM=0 -o src/qc8-gemm/gen/4x4-minmax-fp32-scalar-fmagic.c &
-tools/xngen src/qs8-gemm/scalar.c.in -D MR=1 -D NR=4 -D REQUANTIZATION=RNDNU -D VARIANT= -D DATATYPE=QS8 -D WASM=0 -o src/qs8-gemm/gen/1x4-minmax-rndnu-scalar.c &
-tools/xngen src/qs8-gemm/scalar.c.in -D MR=2 -D NR=4 -D REQUANTIZATION=RNDNU -D VARIANT= -D DATATYPE=QS8 -D WASM=0 -o src/qs8-gemm/gen/2x4-minmax-rndnu-scalar.c &
-tools/xngen src/qs8-gemm/scalar.c.in -D MR=3 -D NR=4 -D REQUANTIZATION=RNDNU -D VARIANT= -D DATATYPE=QS8 -D WASM=0 -o src/qs8-gemm/gen/3x4-minmax-rndnu-scalar.c &
-tools/xngen src/qs8-gemm/scalar.c.in -D MR=4 -D NR=4 -D REQUANTIZATION=RNDNU -D VARIANT= -D DATATYPE=QS8 -D WASM=0 -o src/qs8-gemm/gen/4x4-minmax-rndnu-scalar.c &
+tools/xngen src/qs8-gemm/scalar.c.in -D MR=1 -D NR=4 -D REQUANTIZATION=FP32 -D VARIANT=FMAGIC -D DATATYPE=QS8 -D WASM=0 -o src/qs8-gemm/gen/1x4-minmax-fp32-scalar-fmagic.c &
+tools/xngen src/qs8-gemm/scalar.c.in -D MR=2 -D NR=4 -D REQUANTIZATION=FP32 -D VARIANT=FMAGIC -D DATATYPE=QS8 -D WASM=0 -o src/qs8-gemm/gen/2x4-minmax-fp32-scalar-fmagic.c &
+tools/xngen src/qs8-gemm/scalar.c.in -D MR=3 -D NR=4 -D REQUANTIZATION=FP32 -D VARIANT=FMAGIC -D DATATYPE=QS8 -D WASM=0 -o src/qs8-gemm/gen/3x4-minmax-fp32-scalar-fmagic.c &
+tools/xngen src/qs8-gemm/scalar.c.in -D MR=4 -D NR=4 -D REQUANTIZATION=FP32 -D VARIANT=FMAGIC -D DATATYPE=QS8 -D WASM=0 -o src/qs8-gemm/gen/4x4-minmax-fp32-scalar-fmagic.c &
-tools/xngen src/qs8-gemm/scalar.c.in -D MR=1 -D NR=4 -D REQUANTIZATION=FP32 -D VARIANT=MAGIC -D DATATYPE=QC8 -D WASM=0 -o src/qc8-gemm/gen/1x4-minmax-fp32-scalar-magic.c &
-tools/xngen src/qs8-gemm/scalar.c.in -D MR=2 -D NR=4 -D REQUANTIZATION=FP32 -D VARIANT=MAGIC -D DATATYPE=QC8 -D WASM=0 -o src/qc8-gemm/gen/2x4-minmax-fp32-scalar-magic.c &
-tools/xngen src/qs8-gemm/scalar.c.in -D MR=3 -D NR=4 -D REQUANTIZATION=FP32 -D VARIANT=MAGIC -D DATATYPE=QC8 -D WASM=0 -o src/qc8-gemm/gen/3x4-minmax-fp32-scalar-magic.c &
-tools/xngen src/qs8-gemm/scalar.c.in -D MR=4 -D NR=4 -D REQUANTIZATION=FP32 -D VARIANT=MAGIC -D DATATYPE=QC8 -D WASM=0 -o src/qc8-gemm/gen/4x4-minmax-fp32-scalar-magic.c &
+tools/xngen src/qs8-gemm/scalar.c.in -D MR=1 -D NR=4 -D REQUANTIZATION=FP32 -D VARIANT=FMAGIC -D DATATYPE=QU8 -D WASM=0 -o src/qu8-gemm/gen/1x4-minmax-fp32-scalar-fmagic.c &
+tools/xngen src/qs8-gemm/scalar.c.in -D MR=2 -D NR=4 -D REQUANTIZATION=FP32 -D VARIANT=FMAGIC -D DATATYPE=QU8 -D WASM=0 -o src/qu8-gemm/gen/2x4-minmax-fp32-scalar-fmagic.c &
+tools/xngen src/qs8-gemm/scalar.c.in -D MR=3 -D NR=4 -D REQUANTIZATION=FP32 -D VARIANT=FMAGIC -D DATATYPE=QU8 -D WASM=0 -o src/qu8-gemm/gen/3x4-minmax-fp32-scalar-fmagic.c &
+tools/xngen src/qs8-gemm/scalar.c.in -D MR=4 -D NR=4 -D REQUANTIZATION=FP32 -D VARIANT=FMAGIC -D DATATYPE=QU8 -D WASM=0 -o src/qu8-gemm/gen/4x4-minmax-fp32-scalar-fmagic.c &
-tools/xngen src/qs8-gemm/scalar.c.in -D MR=1 -D NR=4 -D REQUANTIZATION=FP32 -D VARIANT=MAGIC -D DATATYPE=QS8 -D WASM=0 -o src/qs8-gemm/gen/1x4-minmax-fp32-scalar-magic.c &
-tools/xngen src/qs8-gemm/scalar.c.in -D MR=2 -D NR=4 -D REQUANTIZATION=FP32 -D VARIANT=MAGIC -D DATATYPE=QS8 -D WASM=0 -o src/qs8-gemm/gen/2x4-minmax-fp32-scalar-magic.c &
-tools/xngen src/qs8-gemm/scalar.c.in -D MR=3 -D NR=4 -D REQUANTIZATION=FP32 -D VARIANT=MAGIC -D DATATYPE=QS8 -D WASM=0 -o src/qs8-gemm/gen/3x4-minmax-fp32-scalar-magic.c &
-tools/xngen src/qs8-gemm/scalar.c.in -D MR=4 -D NR=4 -D REQUANTIZATION=FP32 -D VARIANT=MAGIC -D DATATYPE=QS8 -D WASM=0 -o src/qs8-gemm/gen/4x4-minmax-fp32-scalar-magic.c &
+tools/xngen src/qs8-gemm/scalar.c.in -D MR=1 -D NR=4 -D REQUANTIZATION=FP32 -D VARIANT=LRINTF -D DATATYPE=QC8 -D WASM=0 -o src/qc8-gemm/gen/1x4-minmax-fp32-scalar-lrintf.c &
+tools/xngen src/qs8-gemm/scalar.c.in -D MR=2 -D NR=4 -D REQUANTIZATION=FP32 -D VARIANT=LRINTF -D DATATYPE=QC8 -D WASM=0 -o src/qc8-gemm/gen/2x4-minmax-fp32-scalar-lrintf.c &
+tools/xngen src/qs8-gemm/scalar.c.in -D MR=3 -D NR=4 -D REQUANTIZATION=FP32 -D VARIANT=LRINTF -D DATATYPE=QC8 -D WASM=0 -o src/qc8-gemm/gen/3x4-minmax-fp32-scalar-lrintf.c &
+tools/xngen src/qs8-gemm/scalar.c.in -D MR=4 -D NR=4 -D REQUANTIZATION=FP32 -D VARIANT=LRINTF -D DATATYPE=QC8 -D WASM=0 -o src/qc8-gemm/gen/4x4-minmax-fp32-scalar-lrintf.c &
-tools/xngen src/qs8-gemm/scalar.c.in -D MR=1 -D NR=4 -D REQUANTIZATION=FP32 -D VARIANT=MAGIC -D DATATYPE=QU8 -D WASM=0 -o src/qu8-gemm/gen/1x4-minmax-fp32-scalar-magic.c &
-tools/xngen src/qs8-gemm/scalar.c.in -D MR=2 -D NR=4 -D REQUANTIZATION=FP32 -D VARIANT=MAGIC -D DATATYPE=QU8 -D WASM=0 -o src/qu8-gemm/gen/2x4-minmax-fp32-scalar-magic.c &
-tools/xngen src/qs8-gemm/scalar.c.in -D MR=3 -D NR=4 -D REQUANTIZATION=FP32 -D VARIANT=MAGIC -D DATATYPE=QU8 -D WASM=0 -o src/qu8-gemm/gen/3x4-minmax-fp32-scalar-magic.c &
-tools/xngen src/qs8-gemm/scalar.c.in -D MR=4 -D NR=4 -D REQUANTIZATION=FP32 -D VARIANT=MAGIC -D DATATYPE=QU8 -D WASM=0 -o src/qu8-gemm/gen/4x4-minmax-fp32-scalar-magic.c &
+tools/xngen src/qs8-gemm/scalar.c.in -D MR=1 -D NR=4 -D REQUANTIZATION=FP32 -D VARIANT=LRINTF -D DATATYPE=QS8 -D WASM=0 -o src/qs8-gemm/gen/1x4-minmax-fp32-scalar-lrintf.c &
+tools/xngen src/qs8-gemm/scalar.c.in -D MR=2 -D NR=4 -D REQUANTIZATION=FP32 -D VARIANT=LRINTF -D DATATYPE=QS8 -D WASM=0 -o src/qs8-gemm/gen/2x4-minmax-fp32-scalar-lrintf.c &
+tools/xngen src/qs8-gemm/scalar.c.in -D MR=3 -D NR=4 -D REQUANTIZATION=FP32 -D VARIANT=LRINTF -D DATATYPE=QS8 -D WASM=0 -o src/qs8-gemm/gen/3x4-minmax-fp32-scalar-lrintf.c &
+tools/xngen src/qs8-gemm/scalar.c.in -D MR=4 -D NR=4 -D REQUANTIZATION=FP32 -D VARIANT=LRINTF -D DATATYPE=QS8 -D WASM=0 -o src/qs8-gemm/gen/4x4-minmax-fp32-scalar-lrintf.c &
-tools/xngen src/qs8-gemm/scalar.c.in -D MR=1 -D NR=4 -D REQUANTIZATION=FP32 -D VARIANT=LRINT -D DATATYPE=QC8 -D WASM=0 -o src/qc8-gemm/gen/1x4-minmax-fp32-scalar-lrint.c &
-tools/xngen src/qs8-gemm/scalar.c.in -D MR=2 -D NR=4 -D REQUANTIZATION=FP32 -D VARIANT=LRINT -D DATATYPE=QC8 -D WASM=0 -o src/qc8-gemm/gen/2x4-minmax-fp32-scalar-lrint.c &
-tools/xngen src/qs8-gemm/scalar.c.in -D MR=3 -D NR=4 -D REQUANTIZATION=FP32 -D VARIANT=LRINT -D DATATYPE=QC8 -D WASM=0 -o src/qc8-gemm/gen/3x4-minmax-fp32-scalar-lrint.c &
-tools/xngen src/qs8-gemm/scalar.c.in -D MR=4 -D NR=4 -D REQUANTIZATION=FP32 -D VARIANT=LRINT -D DATATYPE=QC8 -D WASM=0 -o src/qc8-gemm/gen/4x4-minmax-fp32-scalar-lrint.c &
-
-tools/xngen src/qs8-gemm/scalar.c.in -D MR=1 -D NR=4 -D REQUANTIZATION=FP32 -D VARIANT=LRINT -D DATATYPE=QS8 -D WASM=0 -o src/qs8-gemm/gen/1x4-minmax-fp32-scalar-lrint.c &
-tools/xngen src/qs8-gemm/scalar.c.in -D MR=2 -D NR=4 -D REQUANTIZATION=FP32 -D VARIANT=LRINT -D DATATYPE=QS8 -D WASM=0 -o src/qs8-gemm/gen/2x4-minmax-fp32-scalar-lrint.c &
-tools/xngen src/qs8-gemm/scalar.c.in -D MR=3 -D NR=4 -D REQUANTIZATION=FP32 -D VARIANT=LRINT -D DATATYPE=QS8 -D WASM=0 -o src/qs8-gemm/gen/3x4-minmax-fp32-scalar-lrint.c &
-tools/xngen src/qs8-gemm/scalar.c.in -D MR=4 -D NR=4 -D REQUANTIZATION=FP32 -D VARIANT=LRINT -D DATATYPE=QS8 -D WASM=0 -o src/qs8-gemm/gen/4x4-minmax-fp32-scalar-lrint.c &
-
-tools/xngen src/qs8-gemm/scalar.c.in -D MR=1 -D NR=4 -D REQUANTIZATION=FP32 -D VARIANT=LRINT -D DATATYPE=QU8 -D WASM=0 -o src/qu8-gemm/gen/1x4-minmax-fp32-scalar-lrint.c &
-tools/xngen src/qs8-gemm/scalar.c.in -D MR=2 -D NR=4 -D REQUANTIZATION=FP32 -D VARIANT=LRINT -D DATATYPE=QU8 -D WASM=0 -o src/qu8-gemm/gen/2x4-minmax-fp32-scalar-lrint.c &
-tools/xngen src/qs8-gemm/scalar.c.in -D MR=3 -D NR=4 -D REQUANTIZATION=FP32 -D VARIANT=LRINT -D DATATYPE=QU8 -D WASM=0 -o src/qu8-gemm/gen/3x4-minmax-fp32-scalar-lrint.c &
-tools/xngen src/qs8-gemm/scalar.c.in -D MR=4 -D NR=4 -D REQUANTIZATION=FP32 -D VARIANT=LRINT -D DATATYPE=QU8 -D WASM=0 -o src/qu8-gemm/gen/4x4-minmax-fp32-scalar-lrint.c &
+tools/xngen src/qs8-gemm/scalar.c.in -D MR=1 -D NR=4 -D REQUANTIZATION=FP32 -D VARIANT=LRINTF -D DATATYPE=QU8 -D WASM=0 -o src/qu8-gemm/gen/1x4-minmax-fp32-scalar-lrintf.c &
+tools/xngen src/qs8-gemm/scalar.c.in -D MR=2 -D NR=4 -D REQUANTIZATION=FP32 -D VARIANT=LRINTF -D DATATYPE=QU8 -D WASM=0 -o src/qu8-gemm/gen/2x4-minmax-fp32-scalar-lrintf.c &
+tools/xngen src/qs8-gemm/scalar.c.in -D MR=3 -D NR=4 -D REQUANTIZATION=FP32 -D VARIANT=LRINTF -D DATATYPE=QU8 -D WASM=0 -o src/qu8-gemm/gen/3x4-minmax-fp32-scalar-lrintf.c &
+tools/xngen src/qs8-gemm/scalar.c.in -D MR=4 -D NR=4 -D REQUANTIZATION=FP32 -D VARIANT=LRINTF -D DATATYPE=QU8 -D WASM=0 -o src/qu8-gemm/gen/4x4-minmax-fp32-scalar-lrintf.c &
################################## WAsm SIMD ##################################
### C2 micro-kernels
diff --git a/scripts/generate-qs8-igemm.sh b/scripts/generate-qs8-igemm.sh
index ddd9485..142996e 100755
--- a/scripts/generate-qs8-igemm.sh
+++ b/scripts/generate-qs8-igemm.sh
@@ -5,75 +5,65 @@
# LICENSE file in the root directory of this source tree.
#################################### Scalar ###################################
-tools/xngen src/qs8-igemm/scalar.c.in -D MR=1 -D NR=2 -D REQUANTIZATION=RNDNU -D VARIANT= -D DATATYPE=QS8 -D WASM=0 -o src/qs8-igemm/gen/1x2-minmax-rndnu-scalar.c &
-tools/xngen src/qs8-igemm/scalar.c.in -D MR=2 -D NR=2 -D REQUANTIZATION=RNDNU -D VARIANT= -D DATATYPE=QS8 -D WASM=0 -o src/qs8-igemm/gen/2x2-minmax-rndnu-scalar.c &
-tools/xngen src/qs8-igemm/scalar.c.in -D MR=3 -D NR=2 -D REQUANTIZATION=RNDNU -D VARIANT= -D DATATYPE=QS8 -D WASM=0 -o src/qs8-igemm/gen/3x2-minmax-rndnu-scalar.c &
-tools/xngen src/qs8-igemm/scalar.c.in -D MR=4 -D NR=2 -D REQUANTIZATION=RNDNU -D VARIANT= -D DATATYPE=QS8 -D WASM=0 -o src/qs8-igemm/gen/4x2-minmax-rndnu-scalar.c &
+tools/xngen src/qs8-igemm/scalar.c.in -D MR=1 -D NR=2 -D REQUANTIZATION=FP32 -D VARIANT=LRINTF -D DATATYPE=QC8 -D WASM=0 -o src/qc8-igemm/gen/1x2-minmax-fp32-scalar-lrintf.c &
+tools/xngen src/qs8-igemm/scalar.c.in -D MR=2 -D NR=2 -D REQUANTIZATION=FP32 -D VARIANT=LRINTF -D DATATYPE=QC8 -D WASM=0 -o src/qc8-igemm/gen/2x2-minmax-fp32-scalar-lrintf.c &
+tools/xngen src/qs8-igemm/scalar.c.in -D MR=3 -D NR=2 -D REQUANTIZATION=FP32 -D VARIANT=LRINTF -D DATATYPE=QC8 -D WASM=0 -o src/qc8-igemm/gen/3x2-minmax-fp32-scalar-lrintf.c &
+tools/xngen src/qs8-igemm/scalar.c.in -D MR=4 -D NR=2 -D REQUANTIZATION=FP32 -D VARIANT=LRINTF -D DATATYPE=QC8 -D WASM=0 -o src/qc8-igemm/gen/4x2-minmax-fp32-scalar-lrintf.c &
-tools/xngen src/qs8-igemm/scalar.c.in -D MR=1 -D NR=2 -D REQUANTIZATION=FP32 -D VARIANT=LRINT -D DATATYPE=QC8 -D WASM=0 -o src/qc8-igemm/gen/1x2-minmax-fp32-scalar-lrint.c &
-tools/xngen src/qs8-igemm/scalar.c.in -D MR=2 -D NR=2 -D REQUANTIZATION=FP32 -D VARIANT=LRINT -D DATATYPE=QC8 -D WASM=0 -o src/qc8-igemm/gen/2x2-minmax-fp32-scalar-lrint.c &
-tools/xngen src/qs8-igemm/scalar.c.in -D MR=3 -D NR=2 -D REQUANTIZATION=FP32 -D VARIANT=LRINT -D DATATYPE=QC8 -D WASM=0 -o src/qc8-igemm/gen/3x2-minmax-fp32-scalar-lrint.c &
-tools/xngen src/qs8-igemm/scalar.c.in -D MR=4 -D NR=2 -D REQUANTIZATION=FP32 -D VARIANT=LRINT -D DATATYPE=QC8 -D WASM=0 -o src/qc8-igemm/gen/4x2-minmax-fp32-scalar-lrint.c &
+tools/xngen src/qs8-igemm/scalar.c.in -D MR=1 -D NR=2 -D REQUANTIZATION=FP32 -D VARIANT=LRINTF -D DATATYPE=QS8 -D WASM=0 -o src/qs8-igemm/gen/1x2-minmax-fp32-scalar-lrintf.c &
+tools/xngen src/qs8-igemm/scalar.c.in -D MR=2 -D NR=2 -D REQUANTIZATION=FP32 -D VARIANT=LRINTF -D DATATYPE=QS8 -D WASM=0 -o src/qs8-igemm/gen/2x2-minmax-fp32-scalar-lrintf.c &
+tools/xngen src/qs8-igemm/scalar.c.in -D MR=3 -D NR=2 -D REQUANTIZATION=FP32 -D VARIANT=LRINTF -D DATATYPE=QS8 -D WASM=0 -o src/qs8-igemm/gen/3x2-minmax-fp32-scalar-lrintf.c &
+tools/xngen src/qs8-igemm/scalar.c.in -D MR=4 -D NR=2 -D REQUANTIZATION=FP32 -D VARIANT=LRINTF -D DATATYPE=QS8 -D WASM=0 -o src/qs8-igemm/gen/4x2-minmax-fp32-scalar-lrintf.c &
-tools/xngen src/qs8-igemm/scalar.c.in -D MR=1 -D NR=2 -D REQUANTIZATION=FP32 -D VARIANT=LRINT -D DATATYPE=QS8 -D WASM=0 -o src/qs8-igemm/gen/1x2-minmax-fp32-scalar-lrint.c &
-tools/xngen src/qs8-igemm/scalar.c.in -D MR=2 -D NR=2 -D REQUANTIZATION=FP32 -D VARIANT=LRINT -D DATATYPE=QS8 -D WASM=0 -o src/qs8-igemm/gen/2x2-minmax-fp32-scalar-lrint.c &
-tools/xngen src/qs8-igemm/scalar.c.in -D MR=3 -D NR=2 -D REQUANTIZATION=FP32 -D VARIANT=LRINT -D DATATYPE=QS8 -D WASM=0 -o src/qs8-igemm/gen/3x2-minmax-fp32-scalar-lrint.c &
-tools/xngen src/qs8-igemm/scalar.c.in -D MR=4 -D NR=2 -D REQUANTIZATION=FP32 -D VARIANT=LRINT -D DATATYPE=QS8 -D WASM=0 -o src/qs8-igemm/gen/4x2-minmax-fp32-scalar-lrint.c &
+tools/xngen src/qs8-igemm/scalar.c.in -D MR=1 -D NR=2 -D REQUANTIZATION=FP32 -D VARIANT=LRINTF -D DATATYPE=QU8 -D WASM=0 -o src/qu8-igemm/gen/1x2-minmax-fp32-scalar-lrintf.c &
+tools/xngen src/qs8-igemm/scalar.c.in -D MR=2 -D NR=2 -D REQUANTIZATION=FP32 -D VARIANT=LRINTF -D DATATYPE=QU8 -D WASM=0 -o src/qu8-igemm/gen/2x2-minmax-fp32-scalar-lrintf.c &
+tools/xngen src/qs8-igemm/scalar.c.in -D MR=3 -D NR=2 -D REQUANTIZATION=FP32 -D VARIANT=LRINTF -D DATATYPE=QU8 -D WASM=0 -o src/qu8-igemm/gen/3x2-minmax-fp32-scalar-lrintf.c &
+tools/xngen src/qs8-igemm/scalar.c.in -D MR=4 -D NR=2 -D REQUANTIZATION=FP32 -D VARIANT=LRINTF -D DATATYPE=QU8 -D WASM=0 -o src/qu8-igemm/gen/4x2-minmax-fp32-scalar-lrintf.c &
-tools/xngen src/qs8-igemm/scalar.c.in -D MR=1 -D NR=2 -D REQUANTIZATION=FP32 -D VARIANT=LRINT -D DATATYPE=QU8 -D WASM=0 -o src/qu8-igemm/gen/1x2-minmax-fp32-scalar-lrint.c &
-tools/xngen src/qs8-igemm/scalar.c.in -D MR=2 -D NR=2 -D REQUANTIZATION=FP32 -D VARIANT=LRINT -D DATATYPE=QU8 -D WASM=0 -o src/qu8-igemm/gen/2x2-minmax-fp32-scalar-lrint.c &
-tools/xngen src/qs8-igemm/scalar.c.in -D MR=3 -D NR=2 -D REQUANTIZATION=FP32 -D VARIANT=LRINT -D DATATYPE=QU8 -D WASM=0 -o src/qu8-igemm/gen/3x2-minmax-fp32-scalar-lrint.c &
-tools/xngen src/qs8-igemm/scalar.c.in -D MR=4 -D NR=2 -D REQUANTIZATION=FP32 -D VARIANT=LRINT -D DATATYPE=QU8 -D WASM=0 -o src/qu8-igemm/gen/4x2-minmax-fp32-scalar-lrint.c &
+tools/xngen src/qs8-igemm/scalar.c.in -D MR=1 -D NR=2 -D REQUANTIZATION=FP32 -D VARIANT=FMAGIC -D DATATYPE=QC8 -D WASM=0 -o src/qc8-igemm/gen/1x2-minmax-fp32-scalar-fmagic.c &
+tools/xngen src/qs8-igemm/scalar.c.in -D MR=2 -D NR=2 -D REQUANTIZATION=FP32 -D VARIANT=FMAGIC -D DATATYPE=QC8 -D WASM=0 -o src/qc8-igemm/gen/2x2-minmax-fp32-scalar-fmagic.c &
+tools/xngen src/qs8-igemm/scalar.c.in -D MR=3 -D NR=2 -D REQUANTIZATION=FP32 -D VARIANT=FMAGIC -D DATATYPE=QC8 -D WASM=0 -o src/qc8-igemm/gen/3x2-minmax-fp32-scalar-fmagic.c &
+tools/xngen src/qs8-igemm/scalar.c.in -D MR=4 -D NR=2 -D REQUANTIZATION=FP32 -D VARIANT=FMAGIC -D DATATYPE=QC8 -D WASM=0 -o src/qc8-igemm/gen/4x2-minmax-fp32-scalar-fmagic.c &
-tools/xngen src/qs8-igemm/scalar.c.in -D MR=1 -D NR=2 -D REQUANTIZATION=FP32 -D VARIANT=MAGIC -D DATATYPE=QC8 -D WASM=0 -o src/qc8-igemm/gen/1x2-minmax-fp32-scalar-magic.c &
-tools/xngen src/qs8-igemm/scalar.c.in -D MR=2 -D NR=2 -D REQUANTIZATION=FP32 -D VARIANT=MAGIC -D DATATYPE=QC8 -D WASM=0 -o src/qc8-igemm/gen/2x2-minmax-fp32-scalar-magic.c &
-tools/xngen src/qs8-igemm/scalar.c.in -D MR=3 -D NR=2 -D REQUANTIZATION=FP32 -D VARIANT=MAGIC -D DATATYPE=QC8 -D WASM=0 -o src/qc8-igemm/gen/3x2-minmax-fp32-scalar-magic.c &
-tools/xngen src/qs8-igemm/scalar.c.in -D MR=4 -D NR=2 -D REQUANTIZATION=FP32 -D VARIANT=MAGIC -D DATATYPE=QC8 -D WASM=0 -o src/qc8-igemm/gen/4x2-minmax-fp32-scalar-magic.c &
+tools/xngen src/qs8-igemm/scalar.c.in -D MR=1 -D NR=2 -D REQUANTIZATION=FP32 -D VARIANT=FMAGIC -D DATATYPE=QS8 -D WASM=0 -o src/qs8-igemm/gen/1x2-minmax-fp32-scalar-fmagic.c &
+tools/xngen src/qs8-igemm/scalar.c.in -D MR=2 -D NR=2 -D REQUANTIZATION=FP32 -D VARIANT=FMAGIC -D DATATYPE=QS8 -D WASM=0 -o src/qs8-igemm/gen/2x2-minmax-fp32-scalar-fmagic.c &
+tools/xngen src/qs8-igemm/scalar.c.in -D MR=3 -D NR=2 -D REQUANTIZATION=FP32 -D VARIANT=FMAGIC -D DATATYPE=QS8 -D WASM=0 -o src/qs8-igemm/gen/3x2-minmax-fp32-scalar-fmagic.c &
+tools/xngen src/qs8-igemm/scalar.c.in -D MR=4 -D NR=2 -D REQUANTIZATION=FP32 -D VARIANT=FMAGIC -D DATATYPE=QS8 -D WASM=0 -o src/qs8-igemm/gen/4x2-minmax-fp32-scalar-fmagic.c &
-tools/xngen src/qs8-igemm/scalar.c.in -D MR=1 -D NR=2 -D REQUANTIZATION=FP32 -D VARIANT=MAGIC -D DATATYPE=QS8 -D WASM=0 -o src/qs8-igemm/gen/1x2-minmax-fp32-scalar-magic.c &
-tools/xngen src/qs8-igemm/scalar.c.in -D MR=2 -D NR=2 -D REQUANTIZATION=FP32 -D VARIANT=MAGIC -D DATATYPE=QS8 -D WASM=0 -o src/qs8-igemm/gen/2x2-minmax-fp32-scalar-magic.c &
-tools/xngen src/qs8-igemm/scalar.c.in -D MR=3 -D NR=2 -D REQUANTIZATION=FP32 -D VARIANT=MAGIC -D DATATYPE=QS8 -D WASM=0 -o src/qs8-igemm/gen/3x2-minmax-fp32-scalar-magic.c &
-tools/xngen src/qs8-igemm/scalar.c.in -D MR=4 -D NR=2 -D REQUANTIZATION=FP32 -D VARIANT=MAGIC -D DATATYPE=QS8 -D WASM=0 -o src/qs8-igemm/gen/4x2-minmax-fp32-scalar-magic.c &
+tools/xngen src/qs8-igemm/scalar.c.in -D MR=1 -D NR=2 -D REQUANTIZATION=FP32 -D VARIANT=FMAGIC -D DATATYPE=QU8 -D WASM=0 -o src/qu8-igemm/gen/1x2-minmax-fp32-scalar-fmagic.c &
+tools/xngen src/qs8-igemm/scalar.c.in -D MR=2 -D NR=2 -D REQUANTIZATION=FP32 -D VARIANT=FMAGIC -D DATATYPE=QU8 -D WASM=0 -o src/qu8-igemm/gen/2x2-minmax-fp32-scalar-fmagic.c &
+tools/xngen src/qs8-igemm/scalar.c.in -D MR=3 -D NR=2 -D REQUANTIZATION=FP32 -D VARIANT=FMAGIC -D DATATYPE=QU8 -D WASM=0 -o src/qu8-igemm/gen/3x2-minmax-fp32-scalar-fmagic.c &
+tools/xngen src/qs8-igemm/scalar.c.in -D MR=4 -D NR=2 -D REQUANTIZATION=FP32 -D VARIANT=FMAGIC -D DATATYPE=QU8 -D WASM=0 -o src/qu8-igemm/gen/4x2-minmax-fp32-scalar-fmagic.c &
-tools/xngen src/qs8-igemm/scalar.c.in -D MR=1 -D NR=2 -D REQUANTIZATION=FP32 -D VARIANT=MAGIC -D DATATYPE=QU8 -D WASM=0 -o src/qu8-igemm/gen/1x2-minmax-fp32-scalar-magic.c &
-tools/xngen src/qs8-igemm/scalar.c.in -D MR=2 -D NR=2 -D REQUANTIZATION=FP32 -D VARIANT=MAGIC -D DATATYPE=QU8 -D WASM=0 -o src/qu8-igemm/gen/2x2-minmax-fp32-scalar-magic.c &
-tools/xngen src/qs8-igemm/scalar.c.in -D MR=3 -D NR=2 -D REQUANTIZATION=FP32 -D VARIANT=MAGIC -D DATATYPE=QU8 -D WASM=0 -o src/qu8-igemm/gen/3x2-minmax-fp32-scalar-magic.c &
-tools/xngen src/qs8-igemm/scalar.c.in -D MR=4 -D NR=2 -D REQUANTIZATION=FP32 -D VARIANT=MAGIC -D DATATYPE=QU8 -D WASM=0 -o src/qu8-igemm/gen/4x2-minmax-fp32-scalar-magic.c &
+tools/xngen src/qs8-igemm/scalar.c.in -D MR=1 -D NR=4 -D REQUANTIZATION=FP32 -D VARIANT=LRINTF -D DATATYPE=QC8 -D WASM=0 -o src/qc8-igemm/gen/1x4-minmax-fp32-scalar-lrintf.c &
+tools/xngen src/qs8-igemm/scalar.c.in -D MR=2 -D NR=4 -D REQUANTIZATION=FP32 -D VARIANT=LRINTF -D DATATYPE=QC8 -D WASM=0 -o src/qc8-igemm/gen/2x4-minmax-fp32-scalar-lrintf.c &
+tools/xngen src/qs8-igemm/scalar.c.in -D MR=3 -D NR=4 -D REQUANTIZATION=FP32 -D VARIANT=LRINTF -D DATATYPE=QC8 -D WASM=0 -o src/qc8-igemm/gen/3x4-minmax-fp32-scalar-lrintf.c &
+tools/xngen src/qs8-igemm/scalar.c.in -D MR=4 -D NR=4 -D REQUANTIZATION=FP32 -D VARIANT=LRINTF -D DATATYPE=QC8 -D WASM=0 -o src/qc8-igemm/gen/4x4-minmax-fp32-scalar-lrintf.c &
-tools/xngen src/qs8-igemm/scalar.c.in -D MR=1 -D NR=4 -D REQUANTIZATION=RNDNU -D VARIANT= -D DATATYPE=QS8 -D WASM=0 -o src/qs8-igemm/gen/1x4-minmax-rndnu-scalar.c &
-tools/xngen src/qs8-igemm/scalar.c.in -D MR=2 -D NR=4 -D REQUANTIZATION=RNDNU -D VARIANT= -D DATATYPE=QS8 -D WASM=0 -o src/qs8-igemm/gen/2x4-minmax-rndnu-scalar.c &
-tools/xngen src/qs8-igemm/scalar.c.in -D MR=3 -D NR=4 -D REQUANTIZATION=RNDNU -D VARIANT= -D DATATYPE=QS8 -D WASM=0 -o src/qs8-igemm/gen/3x4-minmax-rndnu-scalar.c &
-tools/xngen src/qs8-igemm/scalar.c.in -D MR=4 -D NR=4 -D REQUANTIZATION=RNDNU -D VARIANT= -D DATATYPE=QS8 -D WASM=0 -o src/qs8-igemm/gen/4x4-minmax-rndnu-scalar.c &
+tools/xngen src/qs8-igemm/scalar.c.in -D MR=1 -D NR=4 -D REQUANTIZATION=FP32 -D VARIANT=LRINTF -D DATATYPE=QS8 -D WASM=0 -o src/qs8-igemm/gen/1x4-minmax-fp32-scalar-lrintf.c &
+tools/xngen src/qs8-igemm/scalar.c.in -D MR=2 -D NR=4 -D REQUANTIZATION=FP32 -D VARIANT=LRINTF -D DATATYPE=QS8 -D WASM=0 -o src/qs8-igemm/gen/2x4-minmax-fp32-scalar-lrintf.c &
+tools/xngen src/qs8-igemm/scalar.c.in -D MR=3 -D NR=4 -D REQUANTIZATION=FP32 -D VARIANT=LRINTF -D DATATYPE=QS8 -D WASM=0 -o src/qs8-igemm/gen/3x4-minmax-fp32-scalar-lrintf.c &
+tools/xngen src/qs8-igemm/scalar.c.in -D MR=4 -D NR=4 -D REQUANTIZATION=FP32 -D VARIANT=LRINTF -D DATATYPE=QS8 -D WASM=0 -o src/qs8-igemm/gen/4x4-minmax-fp32-scalar-lrintf.c &
-tools/xngen src/qs8-igemm/scalar.c.in -D MR=1 -D NR=4 -D REQUANTIZATION=FP32 -D VARIANT=LRINT -D DATATYPE=QC8 -D WASM=0 -o src/qc8-igemm/gen/1x4-minmax-fp32-scalar-lrint.c &
-tools/xngen src/qs8-igemm/scalar.c.in -D MR=2 -D NR=4 -D REQUANTIZATION=FP32 -D VARIANT=LRINT -D DATATYPE=QC8 -D WASM=0 -o src/qc8-igemm/gen/2x4-minmax-fp32-scalar-lrint.c &
-tools/xngen src/qs8-igemm/scalar.c.in -D MR=3 -D NR=4 -D REQUANTIZATION=FP32 -D VARIANT=LRINT -D DATATYPE=QC8 -D WASM=0 -o src/qc8-igemm/gen/3x4-minmax-fp32-scalar-lrint.c &
-tools/xngen src/qs8-igemm/scalar.c.in -D MR=4 -D NR=4 -D REQUANTIZATION=FP32 -D VARIANT=LRINT -D DATATYPE=QC8 -D WASM=0 -o src/qc8-igemm/gen/4x4-minmax-fp32-scalar-lrint.c &
+tools/xngen src/qs8-igemm/scalar.c.in -D MR=1 -D NR=4 -D REQUANTIZATION=FP32 -D VARIANT=LRINTF -D DATATYPE=QU8 -D WASM=0 -o src/qu8-igemm/gen/1x4-minmax-fp32-scalar-lrintf.c &
+tools/xngen src/qs8-igemm/scalar.c.in -D MR=2 -D NR=4 -D REQUANTIZATION=FP32 -D VARIANT=LRINTF -D DATATYPE=QU8 -D WASM=0 -o src/qu8-igemm/gen/2x4-minmax-fp32-scalar-lrintf.c &
+tools/xngen src/qs8-igemm/scalar.c.in -D MR=3 -D NR=4 -D REQUANTIZATION=FP32 -D VARIANT=LRINTF -D DATATYPE=QU8 -D WASM=0 -o src/qu8-igemm/gen/3x4-minmax-fp32-scalar-lrintf.c &
+tools/xngen src/qs8-igemm/scalar.c.in -D MR=4 -D NR=4 -D REQUANTIZATION=FP32 -D VARIANT=LRINTF -D DATATYPE=QU8 -D WASM=0 -o src/qu8-igemm/gen/4x4-minmax-fp32-scalar-lrintf.c &
-tools/xngen src/qs8-igemm/scalar.c.in -D MR=1 -D NR=4 -D REQUANTIZATION=FP32 -D VARIANT=LRINT -D DATATYPE=QS8 -D WASM=0 -o src/qs8-igemm/gen/1x4-minmax-fp32-scalar-lrint.c &
-tools/xngen src/qs8-igemm/scalar.c.in -D MR=2 -D NR=4 -D REQUANTIZATION=FP32 -D VARIANT=LRINT -D DATATYPE=QS8 -D WASM=0 -o src/qs8-igemm/gen/2x4-minmax-fp32-scalar-lrint.c &
-tools/xngen src/qs8-igemm/scalar.c.in -D MR=3 -D NR=4 -D REQUANTIZATION=FP32 -D VARIANT=LRINT -D DATATYPE=QS8 -D WASM=0 -o src/qs8-igemm/gen/3x4-minmax-fp32-scalar-lrint.c &
-tools/xngen src/qs8-igemm/scalar.c.in -D MR=4 -D NR=4 -D REQUANTIZATION=FP32 -D VARIANT=LRINT -D DATATYPE=QS8 -D WASM=0 -o src/qs8-igemm/gen/4x4-minmax-fp32-scalar-lrint.c &
+tools/xngen src/qs8-igemm/scalar.c.in -D MR=1 -D NR=4 -D REQUANTIZATION=FP32 -D VARIANT=FMAGIC -D DATATYPE=QC8 -D WASM=0 -o src/qc8-igemm/gen/1x4-minmax-fp32-scalar-fmagic.c &
+tools/xngen src/qs8-igemm/scalar.c.in -D MR=2 -D NR=4 -D REQUANTIZATION=FP32 -D VARIANT=FMAGIC -D DATATYPE=QC8 -D WASM=0 -o src/qc8-igemm/gen/2x4-minmax-fp32-scalar-fmagic.c &
+tools/xngen src/qs8-igemm/scalar.c.in -D MR=3 -D NR=4 -D REQUANTIZATION=FP32 -D VARIANT=FMAGIC -D DATATYPE=QC8 -D WASM=0 -o src/qc8-igemm/gen/3x4-minmax-fp32-scalar-fmagic.c &
+tools/xngen src/qs8-igemm/scalar.c.in -D MR=4 -D NR=4 -D REQUANTIZATION=FP32 -D VARIANT=FMAGIC -D DATATYPE=QC8 -D WASM=0 -o src/qc8-igemm/gen/4x4-minmax-fp32-scalar-fmagic.c &
-tools/xngen src/qs8-igemm/scalar.c.in -D MR=1 -D NR=4 -D REQUANTIZATION=FP32 -D VARIANT=LRINT -D DATATYPE=QU8 -D WASM=0 -o src/qu8-igemm/gen/1x4-minmax-fp32-scalar-lrint.c &
-tools/xngen src/qs8-igemm/scalar.c.in -D MR=2 -D NR=4 -D REQUANTIZATION=FP32 -D VARIANT=LRINT -D DATATYPE=QU8 -D WASM=0 -o src/qu8-igemm/gen/2x4-minmax-fp32-scalar-lrint.c &
-tools/xngen src/qs8-igemm/scalar.c.in -D MR=3 -D NR=4 -D REQUANTIZATION=FP32 -D VARIANT=LRINT -D DATATYPE=QU8 -D WASM=0 -o src/qu8-igemm/gen/3x4-minmax-fp32-scalar-lrint.c &
-tools/xngen src/qs8-igemm/scalar.c.in -D MR=4 -D NR=4 -D REQUANTIZATION=FP32 -D VARIANT=LRINT -D DATATYPE=QU8 -D WASM=0 -o src/qu8-igemm/gen/4x4-minmax-fp32-scalar-lrint.c &
+tools/xngen src/qs8-igemm/scalar.c.in -D MR=1 -D NR=4 -D REQUANTIZATION=FP32 -D VARIANT=FMAGIC -D DATATYPE=QS8 -D WASM=0 -o src/qs8-igemm/gen/1x4-minmax-fp32-scalar-fmagic.c &
+tools/xngen src/qs8-igemm/scalar.c.in -D MR=2 -D NR=4 -D REQUANTIZATION=FP32 -D VARIANT=FMAGIC -D DATATYPE=QS8 -D WASM=0 -o src/qs8-igemm/gen/2x4-minmax-fp32-scalar-fmagic.c &
+tools/xngen src/qs8-igemm/scalar.c.in -D MR=3 -D NR=4 -D REQUANTIZATION=FP32 -D VARIANT=FMAGIC -D DATATYPE=QS8 -D WASM=0 -o src/qs8-igemm/gen/3x4-minmax-fp32-scalar-fmagic.c &
+tools/xngen src/qs8-igemm/scalar.c.in -D MR=4 -D NR=4 -D REQUANTIZATION=FP32 -D VARIANT=FMAGIC -D DATATYPE=QS8 -D WASM=0 -o src/qs8-igemm/gen/4x4-minmax-fp32-scalar-fmagic.c &
-tools/xngen src/qs8-igemm/scalar.c.in -D MR=1 -D NR=4 -D REQUANTIZATION=FP32 -D VARIANT=MAGIC -D DATATYPE=QC8 -D WASM=0 -o src/qc8-igemm/gen/1x4-minmax-fp32-scalar-magic.c &
-tools/xngen src/qs8-igemm/scalar.c.in -D MR=2 -D NR=4 -D REQUANTIZATION=FP32 -D VARIANT=MAGIC -D DATATYPE=QC8 -D WASM=0 -o src/qc8-igemm/gen/2x4-minmax-fp32-scalar-magic.c &
-tools/xngen src/qs8-igemm/scalar.c.in -D MR=3 -D NR=4 -D REQUANTIZATION=FP32 -D VARIANT=MAGIC -D DATATYPE=QC8 -D WASM=0 -o src/qc8-igemm/gen/3x4-minmax-fp32-scalar-magic.c &
-tools/xngen src/qs8-igemm/scalar.c.in -D MR=4 -D NR=4 -D REQUANTIZATION=FP32 -D VARIANT=MAGIC -D DATATYPE=QC8 -D WASM=0 -o src/qc8-igemm/gen/4x4-minmax-fp32-scalar-magic.c &
-
-tools/xngen src/qs8-igemm/scalar.c.in -D MR=1 -D NR=4 -D REQUANTIZATION=FP32 -D VARIANT=MAGIC -D DATATYPE=QS8 -D WASM=0 -o src/qs8-igemm/gen/1x4-minmax-fp32-scalar-magic.c &
-tools/xngen src/qs8-igemm/scalar.c.in -D MR=2 -D NR=4 -D REQUANTIZATION=FP32 -D VARIANT=MAGIC -D DATATYPE=QS8 -D WASM=0 -o src/qs8-igemm/gen/2x4-minmax-fp32-scalar-magic.c &
-tools/xngen src/qs8-igemm/scalar.c.in -D MR=3 -D NR=4 -D REQUANTIZATION=FP32 -D VARIANT=MAGIC -D DATATYPE=QS8 -D WASM=0 -o src/qs8-igemm/gen/3x4-minmax-fp32-scalar-magic.c &
-tools/xngen src/qs8-igemm/scalar.c.in -D MR=4 -D NR=4 -D REQUANTIZATION=FP32 -D VARIANT=MAGIC -D DATATYPE=QS8 -D WASM=0 -o src/qs8-igemm/gen/4x4-minmax-fp32-scalar-magic.c &
-
-tools/xngen src/qs8-igemm/scalar.c.in -D MR=1 -D NR=4 -D REQUANTIZATION=FP32 -D VARIANT=MAGIC -D DATATYPE=QU8 -D WASM=0 -o src/qu8-igemm/gen/1x4-minmax-fp32-scalar-magic.c &
-tools/xngen src/qs8-igemm/scalar.c.in -D MR=2 -D NR=4 -D REQUANTIZATION=FP32 -D VARIANT=MAGIC -D DATATYPE=QU8 -D WASM=0 -o src/qu8-igemm/gen/2x4-minmax-fp32-scalar-magic.c &
-tools/xngen src/qs8-igemm/scalar.c.in -D MR=3 -D NR=4 -D REQUANTIZATION=FP32 -D VARIANT=MAGIC -D DATATYPE=QU8 -D WASM=0 -o src/qu8-igemm/gen/3x4-minmax-fp32-scalar-magic.c &
-tools/xngen src/qs8-igemm/scalar.c.in -D MR=4 -D NR=4 -D REQUANTIZATION=FP32 -D VARIANT=MAGIC -D DATATYPE=QU8 -D WASM=0 -o src/qu8-igemm/gen/4x4-minmax-fp32-scalar-magic.c &
+tools/xngen src/qs8-igemm/scalar.c.in -D MR=1 -D NR=4 -D REQUANTIZATION=FP32 -D VARIANT=FMAGIC -D DATATYPE=QU8 -D WASM=0 -o src/qu8-igemm/gen/1x4-minmax-fp32-scalar-fmagic.c &
+tools/xngen src/qs8-igemm/scalar.c.in -D MR=2 -D NR=4 -D REQUANTIZATION=FP32 -D VARIANT=FMAGIC -D DATATYPE=QU8 -D WASM=0 -o src/qu8-igemm/gen/2x4-minmax-fp32-scalar-fmagic.c &
+tools/xngen src/qs8-igemm/scalar.c.in -D MR=3 -D NR=4 -D REQUANTIZATION=FP32 -D VARIANT=FMAGIC -D DATATYPE=QU8 -D WASM=0 -o src/qu8-igemm/gen/3x4-minmax-fp32-scalar-fmagic.c &
+tools/xngen src/qs8-igemm/scalar.c.in -D MR=4 -D NR=4 -D REQUANTIZATION=FP32 -D VARIANT=FMAGIC -D DATATYPE=QU8 -D WASM=0 -o src/qu8-igemm/gen/4x4-minmax-fp32-scalar-fmagic.c &
################################## WAsm SIMD ##################################
### C2 micro-kernels
diff --git a/src/init.c b/src/init.c
index e2daf71..280fb19 100644
--- a/src/init.c
+++ b/src/init.c
@@ -865,20 +865,20 @@
#ifndef XNN_NO_QS8_OPERATORS
init_flags |= XNN_INIT_FLAG_QS8;
- xnn_params.qs8.gemm.minmax.gemm = xnn_init_hmp_gemm_ukernel((xnn_gemm_ukernel_function) xnn_qs8_gemm_minmax_fp32_ukernel_2x2__scalar_magic);
- xnn_params.qs8.gemm.minmax.igemm = xnn_init_hmp_igemm_ukernel((xnn_igemm_ukernel_function) xnn_qs8_igemm_minmax_fp32_ukernel_2x2__scalar_magic);
- xnn_params.qs8.gemm.minmax.gemm1 = xnn_init_hmp_gemm_ukernel((xnn_gemm_ukernel_function) xnn_qs8_gemm_minmax_fp32_ukernel_1x2__scalar_magic);
- xnn_params.qs8.gemm.minmax.igemm1 = xnn_init_hmp_igemm_ukernel((xnn_igemm_ukernel_function) xnn_qs8_igemm_minmax_fp32_ukernel_1x2__scalar_magic);
- xnn_params.qs8.gemm.init.qs8 = xnn_init_qs8_conv_minmax_fp32_scalar_magic_params;
+ xnn_params.qs8.gemm.minmax.gemm = xnn_init_hmp_gemm_ukernel((xnn_gemm_ukernel_function) xnn_qs8_gemm_minmax_fp32_ukernel_2x2__scalar_fmagic);
+ xnn_params.qs8.gemm.minmax.igemm = xnn_init_hmp_igemm_ukernel((xnn_igemm_ukernel_function) xnn_qs8_igemm_minmax_fp32_ukernel_2x2__scalar_fmagic);
+ xnn_params.qs8.gemm.minmax.gemm1 = xnn_init_hmp_gemm_ukernel((xnn_gemm_ukernel_function) xnn_qs8_gemm_minmax_fp32_ukernel_1x2__scalar_fmagic);
+ xnn_params.qs8.gemm.minmax.igemm1 = xnn_init_hmp_igemm_ukernel((xnn_igemm_ukernel_function) xnn_qs8_igemm_minmax_fp32_ukernel_1x2__scalar_fmagic);
+ xnn_params.qs8.gemm.init.qs8 = xnn_init_qs8_conv_minmax_fp32_scalar_fmagic_params;
xnn_params.qs8.gemm.mr = 2;
xnn_params.qs8.gemm.nr = 2;
- xnn_params.qs8.dwconv[0].minmax.unipass = (xnn_dwconv_unipass_ukernel_function) xnn_qs8_dwconv_minmax_fp32_ukernel_up1x9__scalar_magic;
- xnn_params.qs8.dwconv[0].init.qs8 = xnn_init_qs8_conv_minmax_fp32_scalar_magic_params;
+ xnn_params.qs8.dwconv[0].minmax.unipass = (xnn_dwconv_unipass_ukernel_function) xnn_qs8_dwconv_minmax_fp32_ukernel_up1x9__scalar_fmagic;
+ xnn_params.qs8.dwconv[0].init.qs8 = xnn_init_qs8_conv_minmax_fp32_scalar_fmagic_params;
xnn_params.qs8.dwconv[0].channel_tile = 1;
xnn_params.qs8.dwconv[0].primary_tile = 9;
- xnn_params.qs8.dwconv[1].minmax.unipass = (xnn_dwconv_unipass_ukernel_function) xnn_qs8_dwconv_minmax_fp32_ukernel_up1x25__scalar_magic;
- xnn_params.qs8.dwconv[1].init.qs8 = xnn_init_qs8_conv_minmax_fp32_scalar_magic_params;
+ xnn_params.qs8.dwconv[1].minmax.unipass = (xnn_dwconv_unipass_ukernel_function) xnn_qs8_dwconv_minmax_fp32_ukernel_up1x25__scalar_fmagic;
+ xnn_params.qs8.dwconv[1].init.qs8 = xnn_init_qs8_conv_minmax_fp32_scalar_fmagic_params;
xnn_params.qs8.dwconv[1].channel_tile = 1;
xnn_params.qs8.dwconv[1].primary_tile = 25;
@@ -907,20 +907,20 @@
#ifndef XNN_NO_QU8_OPERATORS
init_flags |= XNN_INIT_FLAG_QU8;
- xnn_params.qu8.gemm.minmax.gemm = xnn_init_hmp_gemm_ukernel((xnn_gemm_ukernel_function) xnn_qu8_gemm_minmax_fp32_ukernel_2x2__scalar_magic);
- xnn_params.qu8.gemm.minmax.igemm = xnn_init_hmp_igemm_ukernel((xnn_igemm_ukernel_function) xnn_qu8_igemm_minmax_fp32_ukernel_2x2__scalar_magic);
- xnn_params.qu8.gemm.minmax.gemm1 = xnn_init_hmp_gemm_ukernel((xnn_gemm_ukernel_function) xnn_qu8_gemm_minmax_fp32_ukernel_1x2__scalar_magic);
- xnn_params.qu8.gemm.minmax.igemm1 = xnn_init_hmp_igemm_ukernel((xnn_igemm_ukernel_function) xnn_qu8_igemm_minmax_fp32_ukernel_1x2__scalar_magic);
- xnn_params.qu8.gemm.init.qu8 = xnn_init_qu8_conv_minmax_fp32_scalar_magic_params;
+ xnn_params.qu8.gemm.minmax.gemm = xnn_init_hmp_gemm_ukernel((xnn_gemm_ukernel_function) xnn_qu8_gemm_minmax_fp32_ukernel_2x2__scalar_fmagic);
+ xnn_params.qu8.gemm.minmax.igemm = xnn_init_hmp_igemm_ukernel((xnn_igemm_ukernel_function) xnn_qu8_igemm_minmax_fp32_ukernel_2x2__scalar_fmagic);
+ xnn_params.qu8.gemm.minmax.gemm1 = xnn_init_hmp_gemm_ukernel((xnn_gemm_ukernel_function) xnn_qu8_gemm_minmax_fp32_ukernel_1x2__scalar_fmagic);
+ xnn_params.qu8.gemm.minmax.igemm1 = xnn_init_hmp_igemm_ukernel((xnn_igemm_ukernel_function) xnn_qu8_igemm_minmax_fp32_ukernel_1x2__scalar_fmagic);
+ xnn_params.qu8.gemm.init.qu8 = xnn_init_qu8_conv_minmax_fp32_scalar_fmagic_params;
xnn_params.qu8.gemm.mr = 2;
xnn_params.qu8.gemm.nr = 2;
- xnn_params.qu8.dwconv[0].minmax.unipass = (xnn_dwconv_unipass_ukernel_function) xnn_qu8_dwconv_minmax_fp32_ukernel_up1x9__scalar_magic;
- xnn_params.qu8.dwconv[0].init.qu8 = xnn_init_qu8_conv_minmax_fp32_scalar_magic_params;
+ xnn_params.qu8.dwconv[0].minmax.unipass = (xnn_dwconv_unipass_ukernel_function) xnn_qu8_dwconv_minmax_fp32_ukernel_up1x9__scalar_fmagic;
+ xnn_params.qu8.dwconv[0].init.qu8 = xnn_init_qu8_conv_minmax_fp32_scalar_fmagic_params;
xnn_params.qu8.dwconv[0].channel_tile = 1;
xnn_params.qu8.dwconv[0].primary_tile = 9;
- xnn_params.qu8.dwconv[1].minmax.unipass = (xnn_dwconv_unipass_ukernel_function) xnn_qu8_dwconv_minmax_fp32_ukernel_up1x25__scalar_magic;
- xnn_params.qu8.dwconv[1].init.qu8 = xnn_init_qu8_conv_minmax_fp32_scalar_magic_params;
+ xnn_params.qu8.dwconv[1].minmax.unipass = (xnn_dwconv_unipass_ukernel_function) xnn_qu8_dwconv_minmax_fp32_ukernel_up1x25__scalar_fmagic;
+ xnn_params.qu8.dwconv[1].init.qu8 = xnn_init_qu8_conv_minmax_fp32_scalar_fmagic_params;
xnn_params.qu8.dwconv[1].channel_tile = 1;
xnn_params.qu8.dwconv[1].primary_tile = 25;
@@ -4934,29 +4934,29 @@
init_flags |= XNN_INIT_FLAG_QC8;
if (is_wasm_x86) {
- xnn_params.qc8.gemm.minmax.gemm = xnn_init_hmp_gemm_ukernel((xnn_gemm_ukernel_function) xnn_qc8_gemm_minmax_fp32_ukernel_2x2__scalar_magic);
- xnn_params.qc8.gemm.minmax.igemm = xnn_init_hmp_igemm_ukernel((xnn_igemm_ukernel_function) xnn_qc8_igemm_minmax_fp32_ukernel_2x2__scalar_magic);
- xnn_params.qc8.gemm.minmax.gemm1 = xnn_init_hmp_gemm_ukernel((xnn_gemm_ukernel_function) xnn_qc8_gemm_minmax_fp32_ukernel_1x2__scalar_magic);
- xnn_params.qc8.gemm.minmax.igemm1 = xnn_init_hmp_igemm_ukernel((xnn_igemm_ukernel_function) xnn_qc8_igemm_minmax_fp32_ukernel_1x2__scalar_magic);
- xnn_params.qc8.gemm.init.qc8 = xnn_init_qs8_minmax_scalar_magic_params;
+ xnn_params.qc8.gemm.minmax.gemm = xnn_init_hmp_gemm_ukernel((xnn_gemm_ukernel_function) xnn_qc8_gemm_minmax_fp32_ukernel_2x2__scalar_fmagic);
+ xnn_params.qc8.gemm.minmax.igemm = xnn_init_hmp_igemm_ukernel((xnn_igemm_ukernel_function) xnn_qc8_igemm_minmax_fp32_ukernel_2x2__scalar_fmagic);
+ xnn_params.qc8.gemm.minmax.gemm1 = xnn_init_hmp_gemm_ukernel((xnn_gemm_ukernel_function) xnn_qc8_gemm_minmax_fp32_ukernel_1x2__scalar_fmagic);
+ xnn_params.qc8.gemm.minmax.igemm1 = xnn_init_hmp_igemm_ukernel((xnn_igemm_ukernel_function) xnn_qc8_igemm_minmax_fp32_ukernel_1x2__scalar_fmagic);
+ xnn_params.qc8.gemm.init.qc8 = xnn_init_qs8_minmax_scalar_fmagic_params;
xnn_params.qc8.gemm.mr = 2;
xnn_params.qc8.gemm.nr = 2;
} else {
- xnn_params.qc8.gemm.minmax.gemm = xnn_init_hmp_gemm_ukernel((xnn_gemm_ukernel_function) xnn_qc8_gemm_minmax_fp32_ukernel_4x4__scalar_magic);
- xnn_params.qc8.gemm.minmax.igemm = xnn_init_hmp_igemm_ukernel((xnn_igemm_ukernel_function) xnn_qc8_igemm_minmax_fp32_ukernel_4x4__scalar_magic);
- xnn_params.qc8.gemm.minmax.gemm1 = xnn_init_hmp_gemm_ukernel((xnn_gemm_ukernel_function) xnn_qc8_gemm_minmax_fp32_ukernel_1x4__scalar_magic);
- xnn_params.qc8.gemm.minmax.igemm1 = xnn_init_hmp_igemm_ukernel((xnn_igemm_ukernel_function) xnn_qc8_igemm_minmax_fp32_ukernel_1x4__scalar_magic);
- xnn_params.qc8.gemm.init.qc8 = xnn_init_qs8_minmax_scalar_magic_params;
+ xnn_params.qc8.gemm.minmax.gemm = xnn_init_hmp_gemm_ukernel((xnn_gemm_ukernel_function) xnn_qc8_gemm_minmax_fp32_ukernel_4x4__scalar_fmagic);
+ xnn_params.qc8.gemm.minmax.igemm = xnn_init_hmp_igemm_ukernel((xnn_igemm_ukernel_function) xnn_qc8_igemm_minmax_fp32_ukernel_4x4__scalar_fmagic);
+ xnn_params.qc8.gemm.minmax.gemm1 = xnn_init_hmp_gemm_ukernel((xnn_gemm_ukernel_function) xnn_qc8_gemm_minmax_fp32_ukernel_1x4__scalar_fmagic);
+ xnn_params.qc8.gemm.minmax.igemm1 = xnn_init_hmp_igemm_ukernel((xnn_igemm_ukernel_function) xnn_qc8_igemm_minmax_fp32_ukernel_1x4__scalar_fmagic);
+ xnn_params.qc8.gemm.init.qc8 = xnn_init_qs8_minmax_scalar_fmagic_params;
xnn_params.qc8.gemm.mr = 4;
xnn_params.qc8.gemm.nr = 4;
}
- xnn_params.qc8.dwconv[0].minmax.unipass = (xnn_dwconv_unipass_ukernel_function) xnn_qc8_dwconv_minmax_fp32_ukernel_up2x9__scalar_magic;
- xnn_params.qc8.dwconv[0].init.qc8 = xnn_init_qs8_minmax_scalar_magic_params;
+ xnn_params.qc8.dwconv[0].minmax.unipass = (xnn_dwconv_unipass_ukernel_function) xnn_qc8_dwconv_minmax_fp32_ukernel_up2x9__scalar_fmagic;
+ xnn_params.qc8.dwconv[0].init.qc8 = xnn_init_qs8_minmax_scalar_fmagic_params;
xnn_params.qc8.dwconv[0].channel_tile = 2;
xnn_params.qc8.dwconv[0].primary_tile = 9;
- xnn_params.qc8.dwconv[1].minmax.unipass = (xnn_dwconv_unipass_ukernel_function) xnn_qc8_dwconv_minmax_fp32_ukernel_up2x25__scalar_magic;
- xnn_params.qc8.dwconv[1].init.qc8 = xnn_init_qs8_minmax_scalar_magic_params;
+ xnn_params.qc8.dwconv[1].minmax.unipass = (xnn_dwconv_unipass_ukernel_function) xnn_qc8_dwconv_minmax_fp32_ukernel_up2x25__scalar_fmagic;
+ xnn_params.qc8.dwconv[1].init.qc8 = xnn_init_qs8_minmax_scalar_fmagic_params;
xnn_params.qc8.dwconv[1].channel_tile = 2;
xnn_params.qc8.dwconv[1].primary_tile = 25;
#endif // XNN_NO_QC8_OPERATORS
@@ -4966,29 +4966,29 @@
init_flags |= XNN_INIT_FLAG_QS8;
if (is_wasm_x86) {
- xnn_params.qs8.gemm.minmax.gemm = xnn_init_hmp_gemm_ukernel((xnn_gemm_ukernel_function) xnn_qs8_gemm_minmax_fp32_ukernel_2x2__scalar_magic);
- xnn_params.qs8.gemm.minmax.igemm = xnn_init_hmp_igemm_ukernel((xnn_igemm_ukernel_function) xnn_qs8_igemm_minmax_fp32_ukernel_2x2__scalar_magic);
- xnn_params.qs8.gemm.minmax.gemm1 = xnn_init_hmp_gemm_ukernel((xnn_gemm_ukernel_function) xnn_qs8_gemm_minmax_fp32_ukernel_1x2__scalar_magic);
- xnn_params.qs8.gemm.minmax.igemm1 = xnn_init_hmp_igemm_ukernel((xnn_igemm_ukernel_function) xnn_qs8_igemm_minmax_fp32_ukernel_1x2__scalar_magic);
- xnn_params.qs8.gemm.init.qs8 = xnn_init_qs8_conv_minmax_fp32_scalar_magic_params;
+ xnn_params.qs8.gemm.minmax.gemm = xnn_init_hmp_gemm_ukernel((xnn_gemm_ukernel_function) xnn_qs8_gemm_minmax_fp32_ukernel_2x2__scalar_fmagic);
+ xnn_params.qs8.gemm.minmax.igemm = xnn_init_hmp_igemm_ukernel((xnn_igemm_ukernel_function) xnn_qs8_igemm_minmax_fp32_ukernel_2x2__scalar_fmagic);
+ xnn_params.qs8.gemm.minmax.gemm1 = xnn_init_hmp_gemm_ukernel((xnn_gemm_ukernel_function) xnn_qs8_gemm_minmax_fp32_ukernel_1x2__scalar_fmagic);
+ xnn_params.qs8.gemm.minmax.igemm1 = xnn_init_hmp_igemm_ukernel((xnn_igemm_ukernel_function) xnn_qs8_igemm_minmax_fp32_ukernel_1x2__scalar_fmagic);
+ xnn_params.qs8.gemm.init.qs8 = xnn_init_qs8_conv_minmax_fp32_scalar_fmagic_params;
xnn_params.qs8.gemm.mr = 2;
xnn_params.qs8.gemm.nr = 2;
} else {
- xnn_params.qs8.gemm.minmax.gemm = xnn_init_hmp_gemm_ukernel((xnn_gemm_ukernel_function) xnn_qs8_gemm_minmax_fp32_ukernel_4x4__scalar_magic);
- xnn_params.qs8.gemm.minmax.igemm = xnn_init_hmp_igemm_ukernel((xnn_igemm_ukernel_function) xnn_qs8_igemm_minmax_fp32_ukernel_4x4__scalar_magic);
- xnn_params.qs8.gemm.minmax.gemm1 = xnn_init_hmp_gemm_ukernel((xnn_gemm_ukernel_function) xnn_qs8_gemm_minmax_fp32_ukernel_1x4__scalar_magic);
- xnn_params.qs8.gemm.minmax.igemm1 = xnn_init_hmp_igemm_ukernel((xnn_igemm_ukernel_function) xnn_qs8_igemm_minmax_fp32_ukernel_1x4__scalar_magic);
- xnn_params.qs8.gemm.init.qs8 = xnn_init_qs8_conv_minmax_fp32_scalar_magic_params;
+ xnn_params.qs8.gemm.minmax.gemm = xnn_init_hmp_gemm_ukernel((xnn_gemm_ukernel_function) xnn_qs8_gemm_minmax_fp32_ukernel_4x4__scalar_fmagic);
+ xnn_params.qs8.gemm.minmax.igemm = xnn_init_hmp_igemm_ukernel((xnn_igemm_ukernel_function) xnn_qs8_igemm_minmax_fp32_ukernel_4x4__scalar_fmagic);
+ xnn_params.qs8.gemm.minmax.gemm1 = xnn_init_hmp_gemm_ukernel((xnn_gemm_ukernel_function) xnn_qs8_gemm_minmax_fp32_ukernel_1x4__scalar_fmagic);
+ xnn_params.qs8.gemm.minmax.igemm1 = xnn_init_hmp_igemm_ukernel((xnn_igemm_ukernel_function) xnn_qs8_igemm_minmax_fp32_ukernel_1x4__scalar_fmagic);
+ xnn_params.qs8.gemm.init.qs8 = xnn_init_qs8_conv_minmax_fp32_scalar_fmagic_params;
xnn_params.qs8.gemm.mr = 4;
xnn_params.qs8.gemm.nr = 4;
}
- xnn_params.qs8.dwconv[0].minmax.unipass = (xnn_dwconv_unipass_ukernel_function) xnn_qs8_dwconv_minmax_fp32_ukernel_up2x9__scalar_magic;
- xnn_params.qs8.dwconv[0].init.qs8 = xnn_init_qs8_conv_minmax_fp32_scalar_magic_params;
+ xnn_params.qs8.dwconv[0].minmax.unipass = (xnn_dwconv_unipass_ukernel_function) xnn_qs8_dwconv_minmax_fp32_ukernel_up2x9__scalar_fmagic;
+ xnn_params.qs8.dwconv[0].init.qs8 = xnn_init_qs8_conv_minmax_fp32_scalar_fmagic_params;
xnn_params.qs8.dwconv[0].channel_tile = 2;
xnn_params.qs8.dwconv[0].primary_tile = 9;
- xnn_params.qs8.dwconv[1].minmax.unipass = (xnn_dwconv_unipass_ukernel_function) xnn_qs8_dwconv_minmax_fp32_ukernel_up2x25__scalar_magic;
- xnn_params.qs8.dwconv[1].init.qs8 = xnn_init_qs8_conv_minmax_fp32_scalar_magic_params;
+ xnn_params.qs8.dwconv[1].minmax.unipass = (xnn_dwconv_unipass_ukernel_function) xnn_qs8_dwconv_minmax_fp32_ukernel_up2x25__scalar_fmagic;
+ xnn_params.qs8.dwconv[1].init.qs8 = xnn_init_qs8_conv_minmax_fp32_scalar_fmagic_params;
xnn_params.qs8.dwconv[1].channel_tile = 2;
xnn_params.qs8.dwconv[1].primary_tile = 25;
@@ -5019,29 +5019,29 @@
init_flags |= XNN_INIT_FLAG_QU8;
if (is_wasm_x86) {
- xnn_params.qu8.gemm.minmax.gemm = xnn_init_hmp_gemm_ukernel((xnn_gemm_ukernel_function) xnn_qu8_gemm_minmax_fp32_ukernel_2x2__scalar_magic);
- xnn_params.qu8.gemm.minmax.igemm = xnn_init_hmp_igemm_ukernel((xnn_igemm_ukernel_function) xnn_qu8_igemm_minmax_fp32_ukernel_2x2__scalar_magic);
- xnn_params.qu8.gemm.minmax.gemm1 = xnn_init_hmp_gemm_ukernel((xnn_gemm_ukernel_function) xnn_qu8_gemm_minmax_fp32_ukernel_1x2__scalar_magic);
- xnn_params.qu8.gemm.minmax.igemm1 = xnn_init_hmp_igemm_ukernel((xnn_igemm_ukernel_function) xnn_qu8_igemm_minmax_fp32_ukernel_1x2__scalar_magic);
- xnn_params.qu8.gemm.init.qu8 = xnn_init_qu8_conv_minmax_fp32_scalar_magic_params;
+ xnn_params.qu8.gemm.minmax.gemm = xnn_init_hmp_gemm_ukernel((xnn_gemm_ukernel_function) xnn_qu8_gemm_minmax_fp32_ukernel_2x2__scalar_fmagic);
+ xnn_params.qu8.gemm.minmax.igemm = xnn_init_hmp_igemm_ukernel((xnn_igemm_ukernel_function) xnn_qu8_igemm_minmax_fp32_ukernel_2x2__scalar_fmagic);
+ xnn_params.qu8.gemm.minmax.gemm1 = xnn_init_hmp_gemm_ukernel((xnn_gemm_ukernel_function) xnn_qu8_gemm_minmax_fp32_ukernel_1x2__scalar_fmagic);
+ xnn_params.qu8.gemm.minmax.igemm1 = xnn_init_hmp_igemm_ukernel((xnn_igemm_ukernel_function) xnn_qu8_igemm_minmax_fp32_ukernel_1x2__scalar_fmagic);
+ xnn_params.qu8.gemm.init.qu8 = xnn_init_qu8_conv_minmax_fp32_scalar_fmagic_params;
xnn_params.qu8.gemm.mr = 2;
xnn_params.qu8.gemm.nr = 2;
} else {
- xnn_params.qu8.gemm.minmax.gemm = xnn_init_hmp_gemm_ukernel((xnn_gemm_ukernel_function) xnn_qu8_gemm_minmax_fp32_ukernel_4x4__scalar_magic);
- xnn_params.qu8.gemm.minmax.igemm = xnn_init_hmp_igemm_ukernel((xnn_igemm_ukernel_function) xnn_qu8_igemm_minmax_fp32_ukernel_4x4__scalar_magic);
- xnn_params.qu8.gemm.minmax.gemm1 = xnn_init_hmp_gemm_ukernel((xnn_gemm_ukernel_function) xnn_qu8_gemm_minmax_fp32_ukernel_1x4__scalar_magic);
- xnn_params.qu8.gemm.minmax.igemm1 = xnn_init_hmp_igemm_ukernel((xnn_igemm_ukernel_function) xnn_qu8_igemm_minmax_fp32_ukernel_1x4__scalar_magic);
- xnn_params.qu8.gemm.init.qu8 = xnn_init_qu8_conv_minmax_fp32_scalar_magic_params;
+ xnn_params.qu8.gemm.minmax.gemm = xnn_init_hmp_gemm_ukernel((xnn_gemm_ukernel_function) xnn_qu8_gemm_minmax_fp32_ukernel_4x4__scalar_fmagic);
+ xnn_params.qu8.gemm.minmax.igemm = xnn_init_hmp_igemm_ukernel((xnn_igemm_ukernel_function) xnn_qu8_igemm_minmax_fp32_ukernel_4x4__scalar_fmagic);
+ xnn_params.qu8.gemm.minmax.gemm1 = xnn_init_hmp_gemm_ukernel((xnn_gemm_ukernel_function) xnn_qu8_gemm_minmax_fp32_ukernel_1x4__scalar_fmagic);
+ xnn_params.qu8.gemm.minmax.igemm1 = xnn_init_hmp_igemm_ukernel((xnn_igemm_ukernel_function) xnn_qu8_igemm_minmax_fp32_ukernel_1x4__scalar_fmagic);
+ xnn_params.qu8.gemm.init.qu8 = xnn_init_qu8_conv_minmax_fp32_scalar_fmagic_params;
xnn_params.qu8.gemm.mr = 4;
xnn_params.qu8.gemm.nr = 4;
}
- xnn_params.qu8.dwconv[0].minmax.unipass = (xnn_dwconv_unipass_ukernel_function) xnn_qu8_dwconv_minmax_fp32_ukernel_up2x9__scalar_magic;
- xnn_params.qu8.dwconv[0].init.qu8 = xnn_init_qu8_conv_minmax_fp32_scalar_magic_params;
+ xnn_params.qu8.dwconv[0].minmax.unipass = (xnn_dwconv_unipass_ukernel_function) xnn_qu8_dwconv_minmax_fp32_ukernel_up2x9__scalar_fmagic;
+ xnn_params.qu8.dwconv[0].init.qu8 = xnn_init_qu8_conv_minmax_fp32_scalar_fmagic_params;
xnn_params.qu8.dwconv[0].channel_tile = 2;
xnn_params.qu8.dwconv[0].primary_tile = 9;
- xnn_params.qu8.dwconv[1].minmax.unipass = (xnn_dwconv_unipass_ukernel_function) xnn_qu8_dwconv_minmax_fp32_ukernel_up2x25__scalar_magic;
- xnn_params.qu8.dwconv[1].init.qu8 = xnn_init_qu8_conv_minmax_fp32_scalar_magic_params;
+ xnn_params.qu8.dwconv[1].minmax.unipass = (xnn_dwconv_unipass_ukernel_function) xnn_qu8_dwconv_minmax_fp32_ukernel_up2x25__scalar_fmagic;
+ xnn_params.qu8.dwconv[1].init.qu8 = xnn_init_qu8_conv_minmax_fp32_scalar_fmagic_params;
xnn_params.qu8.dwconv[1].channel_tile = 2;
xnn_params.qu8.dwconv[1].primary_tile = 25;
@@ -5542,12 +5542,12 @@
xnn_params.qs8.gemm.mr = 3;
xnn_params.qs8.gemm.nr = 4;
- xnn_params.qs8.dwconv[0].minmax.unipass = (xnn_dwconv_unipass_ukernel_function) xnn_qs8_dwconv_minmax_gemmlowp_ukernel_up2x9__scalar;
- xnn_params.qs8.dwconv[0].init.qs8 = xnn_init_qs8_conv_minmax_gemmlowp_scalar_params;
+ xnn_params.qs8.dwconv[0].minmax.unipass = (xnn_dwconv_unipass_ukernel_function) xnn_qs8_dwconv_minmax_rndnu_ukernel_up2x9__scalar;
+ xnn_params.qs8.dwconv[0].init.qs8 = xnn_init_qs8_conv_minmax_rndnu_scalar_params;
xnn_params.qs8.dwconv[0].channel_tile = 2;
xnn_params.qs8.dwconv[0].primary_tile = 9;
- xnn_params.qs8.dwconv[1].minmax.unipass = (xnn_dwconv_unipass_ukernel_function) xnn_qs8_dwconv_minmax_gemmlowp_ukernel_up2x25__scalar;
- xnn_params.qs8.dwconv[1].init.qs8 = xnn_init_qs8_conv_minmax_gemmlowp_scalar_params;
+ xnn_params.qs8.dwconv[1].minmax.unipass = (xnn_dwconv_unipass_ukernel_function) xnn_qs8_dwconv_minmax_rndnu_ukernel_up2x25__scalar;
+ xnn_params.qs8.dwconv[1].init.qs8 = xnn_init_qs8_conv_minmax_rndnu_scalar_params;
xnn_params.qs8.dwconv[1].channel_tile = 2;
xnn_params.qs8.dwconv[1].primary_tile = 25;
diff --git a/src/params-init.c b/src/params-init.c
index 9bee350..e28ae17 100644
--- a/src/params-init.c
+++ b/src/params-init.c
@@ -14,7 +14,7 @@
#include <xnnpack/params-init.h>
-void xnn_init_qu8_conv_minmax_fp32_scalar_lrint_params(
+void xnn_init_qu8_conv_minmax_fp32_scalar_lrintf_params(
union xnn_qu8_conv_minmax_params params[XNN_MIN_ELEMENTS(1)],
uint8_t kernel_zero_point,
float scale,
@@ -25,14 +25,14 @@
assert(scale >= 0x1.0p-32f);
assert(scale < 256.0f);
- params->fp32_scalar_lrint.kernel_zero_point = (int32_t) (uint32_t) kernel_zero_point;
- params->fp32_scalar_lrint.scale = scale;
- params->fp32_scalar_lrint.output_min_less_zero_point = (long) (int32_t) ((uint32_t) output_min - (uint32_t) output_zero_point);
- params->fp32_scalar_lrint.output_max_less_zero_point = (long) (int32_t) ((uint32_t) output_max - (uint32_t) output_zero_point);
- params->fp32_scalar_lrint.output_zero_point = (int32_t) (uint32_t) output_zero_point;
+ params->fp32_scalar_lrintf.kernel_zero_point = (int32_t) (uint32_t) kernel_zero_point;
+ params->fp32_scalar_lrintf.scale = scale;
+ params->fp32_scalar_lrintf.output_min_less_zero_point = (float) (int32_t) ((uint32_t) output_min - (uint32_t) output_zero_point);
+ params->fp32_scalar_lrintf.output_max_less_zero_point = (float) (int32_t) ((uint32_t) output_max - (uint32_t) output_zero_point);
+ params->fp32_scalar_lrintf.output_zero_point = (int32_t) (uint32_t) output_zero_point;
}
-void xnn_init_qu8_conv_minmax_fp32_scalar_magic_params(
+void xnn_init_qu8_conv_minmax_fp32_scalar_fmagic_params(
union xnn_qu8_conv_minmax_params params[XNN_MIN_ELEMENTS(1)],
uint8_t kernel_zero_point,
float scale,
@@ -43,12 +43,12 @@
assert(scale >= 0x1.0p-32f);
assert(scale < 256.0f);
- params->fp32_scalar_magic.kernel_zero_point = (int32_t) (uint32_t) kernel_zero_point;
- params->fp32_scalar_magic.scale = scale;
- params->fp32_scalar_magic.output_min_less_zero_point = (float) (int32_t) ((uint32_t) output_min - (uint32_t) output_zero_point);
- params->fp32_scalar_magic.output_max_less_zero_point = (float) (int32_t) ((uint32_t) output_max - (uint32_t) output_zero_point);
- params->fp32_scalar_magic.magic_bias = 12582912.0f;
- params->fp32_scalar_magic.magic_bias_less_output_zero_point = INT32_C(0x4B400000) - (int32_t) (uint32_t) output_zero_point;
+ params->fp32_scalar_fmagic.kernel_zero_point = (int32_t) (uint32_t) kernel_zero_point;
+ params->fp32_scalar_fmagic.scale = scale;
+ params->fp32_scalar_fmagic.output_min_less_zero_point = (float) (int32_t) ((uint32_t) output_min - (uint32_t) output_zero_point);
+ params->fp32_scalar_fmagic.output_max_less_zero_point = (float) (int32_t) ((uint32_t) output_max - (uint32_t) output_zero_point);
+ params->fp32_scalar_fmagic.magic_bias = 12582912.0f;
+ params->fp32_scalar_fmagic.magic_bias_less_output_zero_point = INT32_C(0x4B400000) - (int32_t) (uint32_t) output_zero_point;
}
#if XNN_ARCH_X86 || XNN_ARCH_X86_64
@@ -243,7 +243,7 @@
}
#endif // XNN_ARCH_WASMSIMD || XNN_ARCH_WASMRELAXEDSIMD
-void xnn_init_qs8_conv_minmax_rndnu_scalar_params(
+void xnn_init_qs8_conv_minmax_fp32_scalar_lrintf_params(
union xnn_qs8_conv_minmax_params params[XNN_MIN_ELEMENTS(1)],
float scale,
int8_t output_zero_point,
@@ -253,30 +253,13 @@
assert(scale >= 0x1.0p-32f);
assert(scale < 256.0f);
- // Compute requantization parameters.
- const uint32_t scale_bits = fp32_to_bits(scale);
-
- // Multiplier is in [0x00800000, 0x00FFFFFF] range.
- const int32_t multiplier = ((int32_t) scale_bits & INT32_C(0x007FFFFF)) | INT32_C(0x00800000);
- assert(multiplier >= INT32_C(0x00800000));
- assert(multiplier <= INT32_C(0x00FFFFFF));
-
- // Shift is in [16, 56] range.
- const uint32_t shift = 127 + 23 - (scale_bits >> 23);
- assert(shift >= 16);
- assert(shift < 56);
- const int64_t rounding = INT64_C(1) << (shift - 1);
-
- params->rndnu_scalar.multiplier = multiplier;
- params->rndnu_scalar.shift = shift;
- params->rndnu_scalar.rounding = rounding;
- params->rndnu_scalar.output_min_less_zero_point = (int32_t) output_min - (int32_t) output_zero_point;
- params->rndnu_scalar.output_max_less_zero_point = (int32_t) output_max - (int32_t) output_zero_point;
- params->rndnu_scalar.output_zero_point = (int32_t) output_zero_point;
+ params->fp32_scalar_lrintf.scale = scale;
+ params->fp32_scalar_lrintf.output_min_less_zero_point = (float) ((int32_t) output_min - (int32_t) output_zero_point);
+ params->fp32_scalar_lrintf.output_max_less_zero_point = (float) ((int32_t) output_max - (int32_t) output_zero_point);
+ params->fp32_scalar_lrintf.output_zero_point = (int32_t) output_zero_point;
}
-
-void xnn_init_qs8_conv_minmax_fp32_scalar_lrint_params(
+void xnn_init_qs8_conv_minmax_fp32_scalar_fmagic_params(
union xnn_qs8_conv_minmax_params params[XNN_MIN_ELEMENTS(1)],
float scale,
int8_t output_zero_point,
@@ -286,27 +269,11 @@
assert(scale >= 0x1.0p-32f);
assert(scale < 256.0f);
- params->fp32_scalar_lrint.scale = scale;
- params->fp32_scalar_lrint.output_min_less_zero_point = (long) ((int32_t) output_min - (int32_t) output_zero_point);
- params->fp32_scalar_lrint.output_max_less_zero_point = (long) ((int32_t) output_max - (int32_t) output_zero_point);
- params->fp32_scalar_lrint.output_zero_point = (int32_t) output_zero_point;
-}
-
-void xnn_init_qs8_conv_minmax_fp32_scalar_magic_params(
- union xnn_qs8_conv_minmax_params params[XNN_MIN_ELEMENTS(1)],
- float scale,
- int8_t output_zero_point,
- int8_t output_min,
- int8_t output_max)
-{
- assert(scale >= 0x1.0p-32f);
- assert(scale < 256.0f);
-
- params->fp32_scalar_magic.scale = scale;
- params->fp32_scalar_magic.output_min_less_zero_point = (float) ((int32_t) output_min - (int32_t) output_zero_point);
- params->fp32_scalar_magic.output_max_less_zero_point = (float) ((int32_t) output_max - (int32_t) output_zero_point);
- params->fp32_scalar_magic.magic_bias = 12582912.0f;
- params->fp32_scalar_magic.magic_bias_less_output_zero_point = INT32_C(0x4B400000) - (int32_t) output_zero_point;
+ params->fp32_scalar_fmagic.scale = scale;
+ params->fp32_scalar_fmagic.output_min_less_zero_point = (float) ((int32_t) output_min - (int32_t) output_zero_point);
+ params->fp32_scalar_fmagic.output_max_less_zero_point = (float) ((int32_t) output_max - (int32_t) output_zero_point);
+ params->fp32_scalar_fmagic.magic_bias = 12582912.0f;
+ params->fp32_scalar_fmagic.magic_bias_less_output_zero_point = INT32_C(0x4B400000) - (int32_t) output_zero_point;
}
#if XNN_ARCH_X86 || XNN_ARCH_X86_64
@@ -513,27 +480,27 @@
}
}
-void xnn_init_qs8_minmax_scalar_lrint_params(
+void xnn_init_qs8_minmax_scalar_lrintf_params(
union xnn_qs8_minmax_params params[XNN_MIN_ELEMENTS(1)],
int8_t output_zero_point,
int8_t output_min,
int8_t output_max)
{
- params->scalar_lrint.output_min_less_zero_point = (long) ((int32_t) output_min - (int32_t) output_zero_point);
- params->scalar_lrint.output_max_less_zero_point = (long) ((int32_t) output_max - (int32_t) output_zero_point);
- params->scalar_lrint.output_zero_point = (int32_t) output_zero_point;
+ params->scalar_lrintf.output_min_less_zero_point = (long) ((int32_t) output_min - (int32_t) output_zero_point);
+ params->scalar_lrintf.output_max_less_zero_point = (long) ((int32_t) output_max - (int32_t) output_zero_point);
+ params->scalar_lrintf.output_zero_point = (int32_t) output_zero_point;
}
-void xnn_init_qs8_minmax_scalar_magic_params(
+void xnn_init_qs8_minmax_scalar_fmagic_params(
union xnn_qs8_minmax_params params[XNN_MIN_ELEMENTS(1)],
int8_t output_zero_point,
int8_t output_min,
int8_t output_max)
{
- params->scalar_magic.output_min_less_zero_point = (float) ((int32_t) output_min - (int32_t) output_zero_point);
- params->scalar_magic.output_max_less_zero_point = (float) ((int32_t) output_max - (int32_t) output_zero_point);
- params->scalar_magic.magic_bias = 12582912.0f;
- params->scalar_magic.magic_bias_less_output_zero_point = INT32_C(0x4B400000) - (int32_t) output_zero_point;
+ params->scalar_fmagic.output_min_less_zero_point = (float) ((int32_t) output_min - (int32_t) output_zero_point);
+ params->scalar_fmagic.output_max_less_zero_point = (float) ((int32_t) output_max - (int32_t) output_zero_point);
+ params->scalar_fmagic.magic_bias = 12582912.0f;
+ params->scalar_fmagic.magic_bias_less_output_zero_point = INT32_C(0x4B400000) - (int32_t) output_zero_point;
}
#if XNN_ARCH_X86 || XNN_ARCH_X86_64
diff --git a/src/qc8-dwconv/gen/up1x25-minmax-fp32-scalar-lrint.c b/src/qc8-dwconv/gen/up1x25-minmax-fp32-scalar-fmagic.c
similarity index 92%
copy from src/qc8-dwconv/gen/up1x25-minmax-fp32-scalar-lrint.c
copy to src/qc8-dwconv/gen/up1x25-minmax-fp32-scalar-fmagic.c
index 229cebc..14a9bc9 100644
--- a/src/qc8-dwconv/gen/up1x25-minmax-fp32-scalar-lrint.c
+++ b/src/qc8-dwconv/gen/up1x25-minmax-fp32-scalar-fmagic.c
@@ -8,13 +8,14 @@
// LICENSE file in the root directory of this source tree.
#include <assert.h>
-#include <math.h>
+
+#include <fp16.h>
#include <xnnpack/dwconv.h>
#include <xnnpack/math.h>
-void xnn_qc8_dwconv_minmax_fp32_ukernel_up1x25__scalar_lrint(
+void xnn_qc8_dwconv_minmax_fp32_ukernel_up1x25__scalar_fmagic(
size_t channels,
size_t output_width,
const int8_t** input,
@@ -29,9 +30,10 @@
assert(channels != 0);
assert(output_width != 0);
- const long voutput_min_less_zero_point = params->scalar_lrint.output_min_less_zero_point;
- const long voutput_max_less_zero_point = params->scalar_lrint.output_max_less_zero_point;
- const int32_t voutput_zero_point = params->scalar_lrint.output_zero_point;
+ const float voutput_min_less_zero_point = params->scalar_fmagic.output_min_less_zero_point;
+ const float voutput_max_less_zero_point = params->scalar_fmagic.output_max_less_zero_point;
+ const float vmagic_bias = params->scalar_fmagic.magic_bias;
+ const int32_t vmagic_bias_less_output_zero_point = params->scalar_fmagic.magic_bias_less_output_zero_point;
do {
const int8_t* i0 = input[0];
assert(i0 != NULL);
@@ -246,11 +248,11 @@
typedef XNN_UNALIGNED float unaligned_float;
const float vscale = *((const unaligned_float*) w);
w = (const void*) ((const float*) w + 1);
- const float vfpacc = (float) vacc * vscale;
- long vrndacc = lrintf(vfpacc);
- vrndacc = XNN_UNPREDICTABLE(vrndacc < voutput_min_less_zero_point) ? voutput_min_less_zero_point : vrndacc;
- vrndacc = XNN_UNPREDICTABLE(vrndacc > voutput_max_less_zero_point) ? voutput_max_less_zero_point : vrndacc;
- int32_t vout = (int32_t) vrndacc + voutput_zero_point;
+ float vfpacc = (float) vacc * vscale;
+ vfpacc = math_max_f32(vfpacc, voutput_min_less_zero_point);
+ vfpacc = math_min_f32(vfpacc, voutput_max_less_zero_point);
+ vfpacc += vmagic_bias;
+ int32_t vout = (int32_t) fp32_to_bits(vfpacc) - vmagic_bias_less_output_zero_point;
*output++ = (int8_t) vout;
} while (--c != 0);
diff --git a/src/qc8-dwconv/gen/up1x25-minmax-fp32-scalar-lrint.c b/src/qc8-dwconv/gen/up1x25-minmax-fp32-scalar-lrintf.c
similarity index 96%
rename from src/qc8-dwconv/gen/up1x25-minmax-fp32-scalar-lrint.c
rename to src/qc8-dwconv/gen/up1x25-minmax-fp32-scalar-lrintf.c
index 229cebc..97d3553 100644
--- a/src/qc8-dwconv/gen/up1x25-minmax-fp32-scalar-lrint.c
+++ b/src/qc8-dwconv/gen/up1x25-minmax-fp32-scalar-lrintf.c
@@ -14,7 +14,7 @@
#include <xnnpack/math.h>
-void xnn_qc8_dwconv_minmax_fp32_ukernel_up1x25__scalar_lrint(
+void xnn_qc8_dwconv_minmax_fp32_ukernel_up1x25__scalar_lrintf(
size_t channels,
size_t output_width,
const int8_t** input,
@@ -29,9 +29,9 @@
assert(channels != 0);
assert(output_width != 0);
- const long voutput_min_less_zero_point = params->scalar_lrint.output_min_less_zero_point;
- const long voutput_max_less_zero_point = params->scalar_lrint.output_max_less_zero_point;
- const int32_t voutput_zero_point = params->scalar_lrint.output_zero_point;
+ const float voutput_min_less_zero_point = params->scalar_lrintf.output_min_less_zero_point;
+ const float voutput_max_less_zero_point = params->scalar_lrintf.output_max_less_zero_point;
+ const int32_t voutput_zero_point = params->scalar_lrintf.output_zero_point;
do {
const int8_t* i0 = input[0];
assert(i0 != NULL);
diff --git a/src/qc8-dwconv/gen/up1x9-minmax-fp32-scalar-lrint.c b/src/qc8-dwconv/gen/up1x9-minmax-fp32-scalar-fmagic.c
similarity index 84%
copy from src/qc8-dwconv/gen/up1x9-minmax-fp32-scalar-lrint.c
copy to src/qc8-dwconv/gen/up1x9-minmax-fp32-scalar-fmagic.c
index 9439744..863e8f0 100644
--- a/src/qc8-dwconv/gen/up1x9-minmax-fp32-scalar-lrint.c
+++ b/src/qc8-dwconv/gen/up1x9-minmax-fp32-scalar-fmagic.c
@@ -8,13 +8,14 @@
// LICENSE file in the root directory of this source tree.
#include <assert.h>
-#include <math.h>
+
+#include <fp16.h>
#include <xnnpack/dwconv.h>
#include <xnnpack/math.h>
-void xnn_qc8_dwconv_minmax_fp32_ukernel_up1x9__scalar_lrint(
+void xnn_qc8_dwconv_minmax_fp32_ukernel_up1x9__scalar_fmagic(
size_t channels,
size_t output_width,
const int8_t** input,
@@ -29,9 +30,10 @@
assert(channels != 0);
assert(output_width != 0);
- const long voutput_min_less_zero_point = params->scalar_lrint.output_min_less_zero_point;
- const long voutput_max_less_zero_point = params->scalar_lrint.output_max_less_zero_point;
- const int32_t voutput_zero_point = params->scalar_lrint.output_zero_point;
+ const float voutput_min_less_zero_point = params->scalar_fmagic.output_min_less_zero_point;
+ const float voutput_max_less_zero_point = params->scalar_fmagic.output_max_less_zero_point;
+ const float vmagic_bias = params->scalar_fmagic.magic_bias;
+ const int32_t vmagic_bias_less_output_zero_point = params->scalar_fmagic.magic_bias_less_output_zero_point;
do {
const int8_t* i0 = input[0];
assert(i0 != NULL);
@@ -118,11 +120,11 @@
typedef XNN_UNALIGNED float unaligned_float;
const float vscale = *((const unaligned_float*) w);
w = (const void*) ((const float*) w + 1);
- const float vfpacc = (float) vacc * vscale;
- long vrndacc = lrintf(vfpacc);
- vrndacc = XNN_UNPREDICTABLE(vrndacc < voutput_min_less_zero_point) ? voutput_min_less_zero_point : vrndacc;
- vrndacc = XNN_UNPREDICTABLE(vrndacc > voutput_max_less_zero_point) ? voutput_max_less_zero_point : vrndacc;
- int32_t vout = (int32_t) vrndacc + voutput_zero_point;
+ float vfpacc = (float) vacc * vscale;
+ vfpacc = math_max_f32(vfpacc, voutput_min_less_zero_point);
+ vfpacc = math_min_f32(vfpacc, voutput_max_less_zero_point);
+ vfpacc += vmagic_bias;
+ int32_t vout = (int32_t) fp32_to_bits(vfpacc) - vmagic_bias_less_output_zero_point;
*output++ = (int8_t) vout;
} while (--c != 0);
diff --git a/src/qc8-dwconv/gen/up1x9-minmax-fp32-scalar-lrint.c b/src/qc8-dwconv/gen/up1x9-minmax-fp32-scalar-lrintf.c
similarity index 92%
rename from src/qc8-dwconv/gen/up1x9-minmax-fp32-scalar-lrint.c
rename to src/qc8-dwconv/gen/up1x9-minmax-fp32-scalar-lrintf.c
index 9439744..fc3ff13 100644
--- a/src/qc8-dwconv/gen/up1x9-minmax-fp32-scalar-lrint.c
+++ b/src/qc8-dwconv/gen/up1x9-minmax-fp32-scalar-lrintf.c
@@ -14,7 +14,7 @@
#include <xnnpack/math.h>
-void xnn_qc8_dwconv_minmax_fp32_ukernel_up1x9__scalar_lrint(
+void xnn_qc8_dwconv_minmax_fp32_ukernel_up1x9__scalar_lrintf(
size_t channels,
size_t output_width,
const int8_t** input,
@@ -29,9 +29,9 @@
assert(channels != 0);
assert(output_width != 0);
- const long voutput_min_less_zero_point = params->scalar_lrint.output_min_less_zero_point;
- const long voutput_max_less_zero_point = params->scalar_lrint.output_max_less_zero_point;
- const int32_t voutput_zero_point = params->scalar_lrint.output_zero_point;
+ const float voutput_min_less_zero_point = params->scalar_lrintf.output_min_less_zero_point;
+ const float voutput_max_less_zero_point = params->scalar_lrintf.output_max_less_zero_point;
+ const int32_t voutput_zero_point = params->scalar_lrintf.output_zero_point;
do {
const int8_t* i0 = input[0];
assert(i0 != NULL);
diff --git a/src/qc8-dwconv/gen/up2x25-minmax-fp32-scalar-lrint.c b/src/qc8-dwconv/gen/up2x25-minmax-fp32-scalar-fmagic.c
similarity index 92%
copy from src/qc8-dwconv/gen/up2x25-minmax-fp32-scalar-lrint.c
copy to src/qc8-dwconv/gen/up2x25-minmax-fp32-scalar-fmagic.c
index 161e641..c79fb84 100644
--- a/src/qc8-dwconv/gen/up2x25-minmax-fp32-scalar-lrint.c
+++ b/src/qc8-dwconv/gen/up2x25-minmax-fp32-scalar-fmagic.c
@@ -8,13 +8,14 @@
// LICENSE file in the root directory of this source tree.
#include <assert.h>
-#include <math.h>
+
+#include <fp16.h>
#include <xnnpack/dwconv.h>
#include <xnnpack/math.h>
-void xnn_qc8_dwconv_minmax_fp32_ukernel_up2x25__scalar_lrint(
+void xnn_qc8_dwconv_minmax_fp32_ukernel_up2x25__scalar_fmagic(
size_t channels,
size_t output_width,
const int8_t** input,
@@ -29,9 +30,10 @@
assert(channels != 0);
assert(output_width != 0);
- const long voutput_min_less_zero_point = params->scalar_lrint.output_min_less_zero_point;
- const long voutput_max_less_zero_point = params->scalar_lrint.output_max_less_zero_point;
- const int32_t voutput_zero_point = params->scalar_lrint.output_zero_point;
+ const float voutput_min_less_zero_point = params->scalar_fmagic.output_min_less_zero_point;
+ const float voutput_max_less_zero_point = params->scalar_fmagic.output_max_less_zero_point;
+ const float vmagic_bias = params->scalar_fmagic.magic_bias;
+ const int32_t vmagic_bias_less_output_zero_point = params->scalar_fmagic.magic_bias_less_output_zero_point;
do {
const int8_t* i0 = input[0];
assert(i0 != NULL);
@@ -430,17 +432,17 @@
vfpacc0 *= vscale0;
vfpacc1 *= vscale1;
- long vrndacc0 = lrintf(vfpacc0);
- long vrndacc1 = lrintf(vfpacc1);
+ vfpacc0 = math_max_f32(vfpacc0, voutput_min_less_zero_point);
+ vfpacc1 = math_max_f32(vfpacc1, voutput_min_less_zero_point);
- vrndacc0 = XNN_UNPREDICTABLE(vrndacc0 < voutput_min_less_zero_point) ? voutput_min_less_zero_point : vrndacc0;
- vrndacc1 = XNN_UNPREDICTABLE(vrndacc1 < voutput_min_less_zero_point) ? voutput_min_less_zero_point : vrndacc1;
+ vfpacc0 = math_min_f32(vfpacc0, voutput_max_less_zero_point);
+ vfpacc1 = math_min_f32(vfpacc1, voutput_max_less_zero_point);
- vrndacc0 = XNN_UNPREDICTABLE(vrndacc0 > voutput_max_less_zero_point) ? voutput_max_less_zero_point : vrndacc0;
- vrndacc1 = XNN_UNPREDICTABLE(vrndacc1 > voutput_max_less_zero_point) ? voutput_max_less_zero_point : vrndacc1;
+ vfpacc0 += vmagic_bias;
+ vfpacc1 += vmagic_bias;
- int32_t vout0 = (int32_t) vrndacc0 + voutput_zero_point;
- int32_t vout1 = (int32_t) vrndacc1 + voutput_zero_point;
+ int32_t vout0 = (int32_t) fp32_to_bits(vfpacc0) - vmagic_bias_less_output_zero_point;
+ int32_t vout1 = (int32_t) fp32_to_bits(vfpacc1) - vmagic_bias_less_output_zero_point;
output[0] = (int8_t) vout0;
output[1] = (int8_t) vout1;
@@ -527,11 +529,11 @@
typedef XNN_UNALIGNED float unaligned_float;
const float vscale = *((const unaligned_float*) ((uintptr_t) w + 2 * sizeof(int32_t) + 50 * sizeof(int8_t)));
- const float vfpacc = (float) vacc * vscale;
- long vrndacc = lrintf(vfpacc);
- vrndacc = XNN_UNPREDICTABLE(vrndacc < voutput_min_less_zero_point) ? voutput_min_less_zero_point : vrndacc;
- vrndacc = XNN_UNPREDICTABLE(vrndacc > voutput_max_less_zero_point) ? voutput_max_less_zero_point : vrndacc;
- int32_t vout = (int32_t) vrndacc + voutput_zero_point;
+ float vfpacc = (float) vacc * vscale;
+ vfpacc = math_max_f32(vfpacc, voutput_min_less_zero_point);
+ vfpacc = math_min_f32(vfpacc, voutput_max_less_zero_point);
+ vfpacc += vmagic_bias;
+ int32_t vout = (int32_t) fp32_to_bits(vfpacc) - vmagic_bias_less_output_zero_point;
*output++ = (int8_t) vout;
}
diff --git a/src/qc8-dwconv/gen/up2x25-minmax-fp32-scalar-lrint.c b/src/qc8-dwconv/gen/up2x25-minmax-fp32-scalar-lrintf.c
similarity index 93%
rename from src/qc8-dwconv/gen/up2x25-minmax-fp32-scalar-lrint.c
rename to src/qc8-dwconv/gen/up2x25-minmax-fp32-scalar-lrintf.c
index 161e641..f87f8b0 100644
--- a/src/qc8-dwconv/gen/up2x25-minmax-fp32-scalar-lrint.c
+++ b/src/qc8-dwconv/gen/up2x25-minmax-fp32-scalar-lrintf.c
@@ -14,7 +14,7 @@
#include <xnnpack/math.h>
-void xnn_qc8_dwconv_minmax_fp32_ukernel_up2x25__scalar_lrint(
+void xnn_qc8_dwconv_minmax_fp32_ukernel_up2x25__scalar_lrintf(
size_t channels,
size_t output_width,
const int8_t** input,
@@ -29,9 +29,9 @@
assert(channels != 0);
assert(output_width != 0);
- const long voutput_min_less_zero_point = params->scalar_lrint.output_min_less_zero_point;
- const long voutput_max_less_zero_point = params->scalar_lrint.output_max_less_zero_point;
- const int32_t voutput_zero_point = params->scalar_lrint.output_zero_point;
+ const float voutput_min_less_zero_point = params->scalar_lrintf.output_min_less_zero_point;
+ const float voutput_max_less_zero_point = params->scalar_lrintf.output_max_less_zero_point;
+ const int32_t voutput_zero_point = params->scalar_lrintf.output_zero_point;
do {
const int8_t* i0 = input[0];
assert(i0 != NULL);
@@ -430,14 +430,14 @@
vfpacc0 *= vscale0;
vfpacc1 *= vscale1;
- long vrndacc0 = lrintf(vfpacc0);
- long vrndacc1 = lrintf(vfpacc1);
+ vfpacc0 = math_max_f32(vfpacc0, voutput_min_less_zero_point);
+ vfpacc1 = math_max_f32(vfpacc1, voutput_min_less_zero_point);
- vrndacc0 = XNN_UNPREDICTABLE(vrndacc0 < voutput_min_less_zero_point) ? voutput_min_less_zero_point : vrndacc0;
- vrndacc1 = XNN_UNPREDICTABLE(vrndacc1 < voutput_min_less_zero_point) ? voutput_min_less_zero_point : vrndacc1;
+ vfpacc0 = math_min_f32(vfpacc0, voutput_max_less_zero_point);
+ vfpacc1 = math_min_f32(vfpacc1, voutput_max_less_zero_point);
- vrndacc0 = XNN_UNPREDICTABLE(vrndacc0 > voutput_max_less_zero_point) ? voutput_max_less_zero_point : vrndacc0;
- vrndacc1 = XNN_UNPREDICTABLE(vrndacc1 > voutput_max_less_zero_point) ? voutput_max_less_zero_point : vrndacc1;
+ const int32_t vrndacc0 = (int32_t) lrintf(vfpacc0);
+ const int32_t vrndacc1 = (int32_t) lrintf(vfpacc1);
int32_t vout0 = (int32_t) vrndacc0 + voutput_zero_point;
int32_t vout1 = (int32_t) vrndacc1 + voutput_zero_point;
@@ -527,11 +527,11 @@
typedef XNN_UNALIGNED float unaligned_float;
const float vscale = *((const unaligned_float*) ((uintptr_t) w + 2 * sizeof(int32_t) + 50 * sizeof(int8_t)));
- const float vfpacc = (float) vacc * vscale;
- long vrndacc = lrintf(vfpacc);
- vrndacc = XNN_UNPREDICTABLE(vrndacc < voutput_min_less_zero_point) ? voutput_min_less_zero_point : vrndacc;
- vrndacc = XNN_UNPREDICTABLE(vrndacc > voutput_max_less_zero_point) ? voutput_max_less_zero_point : vrndacc;
- int32_t vout = (int32_t) vrndacc + voutput_zero_point;
+ float vfpacc = (float) vacc * vscale;
+ vfpacc = math_max_f32(vfpacc, voutput_min_less_zero_point);
+ vfpacc = math_min_f32(vfpacc, voutput_max_less_zero_point);
+ const int32_t vrndacc = (int32_t) lrintf(vfpacc);
+ int32_t vout = vrndacc + voutput_zero_point;
*output++ = (int8_t) vout;
}
diff --git a/src/qc8-dwconv/gen/up2x9-minmax-fp32-scalar-lrint.c b/src/qc8-dwconv/gen/up2x9-minmax-fp32-scalar-fmagic.c
similarity index 85%
copy from src/qc8-dwconv/gen/up2x9-minmax-fp32-scalar-lrint.c
copy to src/qc8-dwconv/gen/up2x9-minmax-fp32-scalar-fmagic.c
index 316b9e8..075be34 100644
--- a/src/qc8-dwconv/gen/up2x9-minmax-fp32-scalar-lrint.c
+++ b/src/qc8-dwconv/gen/up2x9-minmax-fp32-scalar-fmagic.c
@@ -8,13 +8,14 @@
// LICENSE file in the root directory of this source tree.
#include <assert.h>
-#include <math.h>
+
+#include <fp16.h>
#include <xnnpack/dwconv.h>
#include <xnnpack/math.h>
-void xnn_qc8_dwconv_minmax_fp32_ukernel_up2x9__scalar_lrint(
+void xnn_qc8_dwconv_minmax_fp32_ukernel_up2x9__scalar_fmagic(
size_t channels,
size_t output_width,
const int8_t** input,
@@ -29,9 +30,10 @@
assert(channels != 0);
assert(output_width != 0);
- const long voutput_min_less_zero_point = params->scalar_lrint.output_min_less_zero_point;
- const long voutput_max_less_zero_point = params->scalar_lrint.output_max_less_zero_point;
- const int32_t voutput_zero_point = params->scalar_lrint.output_zero_point;
+ const float voutput_min_less_zero_point = params->scalar_fmagic.output_min_less_zero_point;
+ const float voutput_max_less_zero_point = params->scalar_fmagic.output_max_less_zero_point;
+ const float vmagic_bias = params->scalar_fmagic.magic_bias;
+ const int32_t vmagic_bias_less_output_zero_point = params->scalar_fmagic.magic_bias_less_output_zero_point;
do {
const int8_t* i0 = input[0];
assert(i0 != NULL);
@@ -190,17 +192,17 @@
vfpacc0 *= vscale0;
vfpacc1 *= vscale1;
- long vrndacc0 = lrintf(vfpacc0);
- long vrndacc1 = lrintf(vfpacc1);
+ vfpacc0 = math_max_f32(vfpacc0, voutput_min_less_zero_point);
+ vfpacc1 = math_max_f32(vfpacc1, voutput_min_less_zero_point);
- vrndacc0 = XNN_UNPREDICTABLE(vrndacc0 < voutput_min_less_zero_point) ? voutput_min_less_zero_point : vrndacc0;
- vrndacc1 = XNN_UNPREDICTABLE(vrndacc1 < voutput_min_less_zero_point) ? voutput_min_less_zero_point : vrndacc1;
+ vfpacc0 = math_min_f32(vfpacc0, voutput_max_less_zero_point);
+ vfpacc1 = math_min_f32(vfpacc1, voutput_max_less_zero_point);
- vrndacc0 = XNN_UNPREDICTABLE(vrndacc0 > voutput_max_less_zero_point) ? voutput_max_less_zero_point : vrndacc0;
- vrndacc1 = XNN_UNPREDICTABLE(vrndacc1 > voutput_max_less_zero_point) ? voutput_max_less_zero_point : vrndacc1;
+ vfpacc0 += vmagic_bias;
+ vfpacc1 += vmagic_bias;
- int32_t vout0 = (int32_t) vrndacc0 + voutput_zero_point;
- int32_t vout1 = (int32_t) vrndacc1 + voutput_zero_point;
+ int32_t vout0 = (int32_t) fp32_to_bits(vfpacc0) - vmagic_bias_less_output_zero_point;
+ int32_t vout1 = (int32_t) fp32_to_bits(vfpacc1) - vmagic_bias_less_output_zero_point;
output[0] = (int8_t) vout0;
output[1] = (int8_t) vout1;
@@ -239,11 +241,11 @@
typedef XNN_UNALIGNED float unaligned_float;
const float vscale = *((const unaligned_float*) ((uintptr_t) w + 2 * sizeof(int32_t) + 18 * sizeof(int8_t)));
- const float vfpacc = (float) vacc * vscale;
- long vrndacc = lrintf(vfpacc);
- vrndacc = XNN_UNPREDICTABLE(vrndacc < voutput_min_less_zero_point) ? voutput_min_less_zero_point : vrndacc;
- vrndacc = XNN_UNPREDICTABLE(vrndacc > voutput_max_less_zero_point) ? voutput_max_less_zero_point : vrndacc;
- int32_t vout = (int32_t) vrndacc + voutput_zero_point;
+ float vfpacc = (float) vacc * vscale;
+ vfpacc = math_max_f32(vfpacc, voutput_min_less_zero_point);
+ vfpacc = math_min_f32(vfpacc, voutput_max_less_zero_point);
+ vfpacc += vmagic_bias;
+ int32_t vout = (int32_t) fp32_to_bits(vfpacc) - vmagic_bias_less_output_zero_point;
*output++ = (int8_t) vout;
}
diff --git a/src/qc8-dwconv/gen/up2x9-minmax-fp32-scalar-lrint.c b/src/qc8-dwconv/gen/up2x9-minmax-fp32-scalar-lrintf.c
similarity index 86%
rename from src/qc8-dwconv/gen/up2x9-minmax-fp32-scalar-lrint.c
rename to src/qc8-dwconv/gen/up2x9-minmax-fp32-scalar-lrintf.c
index 316b9e8..f71e161 100644
--- a/src/qc8-dwconv/gen/up2x9-minmax-fp32-scalar-lrint.c
+++ b/src/qc8-dwconv/gen/up2x9-minmax-fp32-scalar-lrintf.c
@@ -14,7 +14,7 @@
#include <xnnpack/math.h>
-void xnn_qc8_dwconv_minmax_fp32_ukernel_up2x9__scalar_lrint(
+void xnn_qc8_dwconv_minmax_fp32_ukernel_up2x9__scalar_lrintf(
size_t channels,
size_t output_width,
const int8_t** input,
@@ -29,9 +29,9 @@
assert(channels != 0);
assert(output_width != 0);
- const long voutput_min_less_zero_point = params->scalar_lrint.output_min_less_zero_point;
- const long voutput_max_less_zero_point = params->scalar_lrint.output_max_less_zero_point;
- const int32_t voutput_zero_point = params->scalar_lrint.output_zero_point;
+ const float voutput_min_less_zero_point = params->scalar_lrintf.output_min_less_zero_point;
+ const float voutput_max_less_zero_point = params->scalar_lrintf.output_max_less_zero_point;
+ const int32_t voutput_zero_point = params->scalar_lrintf.output_zero_point;
do {
const int8_t* i0 = input[0];
assert(i0 != NULL);
@@ -190,14 +190,14 @@
vfpacc0 *= vscale0;
vfpacc1 *= vscale1;
- long vrndacc0 = lrintf(vfpacc0);
- long vrndacc1 = lrintf(vfpacc1);
+ vfpacc0 = math_max_f32(vfpacc0, voutput_min_less_zero_point);
+ vfpacc1 = math_max_f32(vfpacc1, voutput_min_less_zero_point);
- vrndacc0 = XNN_UNPREDICTABLE(vrndacc0 < voutput_min_less_zero_point) ? voutput_min_less_zero_point : vrndacc0;
- vrndacc1 = XNN_UNPREDICTABLE(vrndacc1 < voutput_min_less_zero_point) ? voutput_min_less_zero_point : vrndacc1;
+ vfpacc0 = math_min_f32(vfpacc0, voutput_max_less_zero_point);
+ vfpacc1 = math_min_f32(vfpacc1, voutput_max_less_zero_point);
- vrndacc0 = XNN_UNPREDICTABLE(vrndacc0 > voutput_max_less_zero_point) ? voutput_max_less_zero_point : vrndacc0;
- vrndacc1 = XNN_UNPREDICTABLE(vrndacc1 > voutput_max_less_zero_point) ? voutput_max_less_zero_point : vrndacc1;
+ const int32_t vrndacc0 = (int32_t) lrintf(vfpacc0);
+ const int32_t vrndacc1 = (int32_t) lrintf(vfpacc1);
int32_t vout0 = (int32_t) vrndacc0 + voutput_zero_point;
int32_t vout1 = (int32_t) vrndacc1 + voutput_zero_point;
@@ -239,11 +239,11 @@
typedef XNN_UNALIGNED float unaligned_float;
const float vscale = *((const unaligned_float*) ((uintptr_t) w + 2 * sizeof(int32_t) + 18 * sizeof(int8_t)));
- const float vfpacc = (float) vacc * vscale;
- long vrndacc = lrintf(vfpacc);
- vrndacc = XNN_UNPREDICTABLE(vrndacc < voutput_min_less_zero_point) ? voutput_min_less_zero_point : vrndacc;
- vrndacc = XNN_UNPREDICTABLE(vrndacc > voutput_max_less_zero_point) ? voutput_max_less_zero_point : vrndacc;
- int32_t vout = (int32_t) vrndacc + voutput_zero_point;
+ float vfpacc = (float) vacc * vscale;
+ vfpacc = math_max_f32(vfpacc, voutput_min_less_zero_point);
+ vfpacc = math_min_f32(vfpacc, voutput_max_less_zero_point);
+ const int32_t vrndacc = (int32_t) lrintf(vfpacc);
+ int32_t vout = vrndacc + voutput_zero_point;
*output++ = (int8_t) vout;
}
diff --git a/src/qc8-dwconv/gen/up4x25-minmax-fp32-scalar-lrint.c b/src/qc8-dwconv/gen/up4x25-minmax-fp32-scalar-fmagic.c
similarity index 92%
copy from src/qc8-dwconv/gen/up4x25-minmax-fp32-scalar-lrint.c
copy to src/qc8-dwconv/gen/up4x25-minmax-fp32-scalar-fmagic.c
index 0e14418..841c93f 100644
--- a/src/qc8-dwconv/gen/up4x25-minmax-fp32-scalar-lrint.c
+++ b/src/qc8-dwconv/gen/up4x25-minmax-fp32-scalar-fmagic.c
@@ -8,13 +8,14 @@
// LICENSE file in the root directory of this source tree.
#include <assert.h>
-#include <math.h>
+
+#include <fp16.h>
#include <xnnpack/dwconv.h>
#include <xnnpack/math.h>
-void xnn_qc8_dwconv_minmax_fp32_ukernel_up4x25__scalar_lrint(
+void xnn_qc8_dwconv_minmax_fp32_ukernel_up4x25__scalar_fmagic(
size_t channels,
size_t output_width,
const int8_t** input,
@@ -29,9 +30,10 @@
assert(channels != 0);
assert(output_width != 0);
- const long voutput_min_less_zero_point = params->scalar_lrint.output_min_less_zero_point;
- const long voutput_max_less_zero_point = params->scalar_lrint.output_max_less_zero_point;
- const int32_t voutput_zero_point = params->scalar_lrint.output_zero_point;
+ const float voutput_min_less_zero_point = params->scalar_fmagic.output_min_less_zero_point;
+ const float voutput_max_less_zero_point = params->scalar_fmagic.output_max_less_zero_point;
+ const float vmagic_bias = params->scalar_fmagic.magic_bias;
+ const int32_t vmagic_bias_less_output_zero_point = params->scalar_fmagic.magic_bias_less_output_zero_point;
do {
const int8_t* i0 = input[0];
assert(i0 != NULL);
@@ -587,25 +589,25 @@
vfpacc2 *= vscale2;
vfpacc3 *= vscale3;
- long vrndacc0 = lrintf(vfpacc0);
- long vrndacc1 = lrintf(vfpacc1);
- long vrndacc2 = lrintf(vfpacc2);
- long vrndacc3 = lrintf(vfpacc3);
+ vfpacc0 = math_max_f32(vfpacc0, voutput_min_less_zero_point);
+ vfpacc1 = math_max_f32(vfpacc1, voutput_min_less_zero_point);
+ vfpacc2 = math_max_f32(vfpacc2, voutput_min_less_zero_point);
+ vfpacc3 = math_max_f32(vfpacc3, voutput_min_less_zero_point);
- vrndacc0 = XNN_UNPREDICTABLE(vrndacc0 < voutput_min_less_zero_point) ? voutput_min_less_zero_point : vrndacc0;
- vrndacc1 = XNN_UNPREDICTABLE(vrndacc1 < voutput_min_less_zero_point) ? voutput_min_less_zero_point : vrndacc1;
- vrndacc2 = XNN_UNPREDICTABLE(vrndacc2 < voutput_min_less_zero_point) ? voutput_min_less_zero_point : vrndacc2;
- vrndacc3 = XNN_UNPREDICTABLE(vrndacc3 < voutput_min_less_zero_point) ? voutput_min_less_zero_point : vrndacc3;
+ vfpacc0 = math_min_f32(vfpacc0, voutput_max_less_zero_point);
+ vfpacc1 = math_min_f32(vfpacc1, voutput_max_less_zero_point);
+ vfpacc2 = math_min_f32(vfpacc2, voutput_max_less_zero_point);
+ vfpacc3 = math_min_f32(vfpacc3, voutput_max_less_zero_point);
- vrndacc0 = XNN_UNPREDICTABLE(vrndacc0 > voutput_max_less_zero_point) ? voutput_max_less_zero_point : vrndacc0;
- vrndacc1 = XNN_UNPREDICTABLE(vrndacc1 > voutput_max_less_zero_point) ? voutput_max_less_zero_point : vrndacc1;
- vrndacc2 = XNN_UNPREDICTABLE(vrndacc2 > voutput_max_less_zero_point) ? voutput_max_less_zero_point : vrndacc2;
- vrndacc3 = XNN_UNPREDICTABLE(vrndacc3 > voutput_max_less_zero_point) ? voutput_max_less_zero_point : vrndacc3;
+ vfpacc0 += vmagic_bias;
+ vfpacc1 += vmagic_bias;
+ vfpacc2 += vmagic_bias;
+ vfpacc3 += vmagic_bias;
- int32_t vout0 = (int32_t) vrndacc0 + voutput_zero_point;
- int32_t vout1 = (int32_t) vrndacc1 + voutput_zero_point;
- int32_t vout2 = (int32_t) vrndacc2 + voutput_zero_point;
- int32_t vout3 = (int32_t) vrndacc3 + voutput_zero_point;
+ int32_t vout0 = (int32_t) fp32_to_bits(vfpacc0) - vmagic_bias_less_output_zero_point;
+ int32_t vout1 = (int32_t) fp32_to_bits(vfpacc1) - vmagic_bias_less_output_zero_point;
+ int32_t vout2 = (int32_t) fp32_to_bits(vfpacc2) - vmagic_bias_less_output_zero_point;
+ int32_t vout3 = (int32_t) fp32_to_bits(vfpacc3) - vmagic_bias_less_output_zero_point;
output[0] = (int8_t) vout0;
output[1] = (int8_t) vout1;
@@ -697,11 +699,11 @@
k += 1;
const float vscale = *((const float*) ((uintptr_t) w + 3 * sizeof(int32_t) + 100 * sizeof(int8_t)));
- const float vfpacc = (float) vacc * vscale;
- long vrndacc = lrintf(vfpacc);
- vrndacc = XNN_UNPREDICTABLE(vrndacc < voutput_min_less_zero_point) ? voutput_min_less_zero_point : vrndacc;
- vrndacc = XNN_UNPREDICTABLE(vrndacc > voutput_max_less_zero_point) ? voutput_max_less_zero_point : vrndacc;
- int32_t vout = (int32_t) vrndacc + voutput_zero_point;
+ float vfpacc = (float) vacc * vscale;
+ vfpacc = math_max_f32(vfpacc, voutput_min_less_zero_point);
+ vfpacc = math_min_f32(vfpacc, voutput_max_less_zero_point);
+ vfpacc += vmagic_bias;
+ int32_t vout = (int32_t) fp32_to_bits(vfpacc) - vmagic_bias_less_output_zero_point;
*output++ = (int8_t) vout;
} while (--c != 0);
diff --git a/src/qc8-dwconv/gen/up4x25-minmax-fp32-scalar-lrint.c b/src/qc8-dwconv/gen/up4x25-minmax-fp32-scalar-lrintf.c
similarity index 93%
rename from src/qc8-dwconv/gen/up4x25-minmax-fp32-scalar-lrint.c
rename to src/qc8-dwconv/gen/up4x25-minmax-fp32-scalar-lrintf.c
index 0e14418..483a760 100644
--- a/src/qc8-dwconv/gen/up4x25-minmax-fp32-scalar-lrint.c
+++ b/src/qc8-dwconv/gen/up4x25-minmax-fp32-scalar-lrintf.c
@@ -14,7 +14,7 @@
#include <xnnpack/math.h>
-void xnn_qc8_dwconv_minmax_fp32_ukernel_up4x25__scalar_lrint(
+void xnn_qc8_dwconv_minmax_fp32_ukernel_up4x25__scalar_lrintf(
size_t channels,
size_t output_width,
const int8_t** input,
@@ -29,9 +29,9 @@
assert(channels != 0);
assert(output_width != 0);
- const long voutput_min_less_zero_point = params->scalar_lrint.output_min_less_zero_point;
- const long voutput_max_less_zero_point = params->scalar_lrint.output_max_less_zero_point;
- const int32_t voutput_zero_point = params->scalar_lrint.output_zero_point;
+ const float voutput_min_less_zero_point = params->scalar_lrintf.output_min_less_zero_point;
+ const float voutput_max_less_zero_point = params->scalar_lrintf.output_max_less_zero_point;
+ const int32_t voutput_zero_point = params->scalar_lrintf.output_zero_point;
do {
const int8_t* i0 = input[0];
assert(i0 != NULL);
@@ -587,20 +587,20 @@
vfpacc2 *= vscale2;
vfpacc3 *= vscale3;
- long vrndacc0 = lrintf(vfpacc0);
- long vrndacc1 = lrintf(vfpacc1);
- long vrndacc2 = lrintf(vfpacc2);
- long vrndacc3 = lrintf(vfpacc3);
+ vfpacc0 = math_max_f32(vfpacc0, voutput_min_less_zero_point);
+ vfpacc1 = math_max_f32(vfpacc1, voutput_min_less_zero_point);
+ vfpacc2 = math_max_f32(vfpacc2, voutput_min_less_zero_point);
+ vfpacc3 = math_max_f32(vfpacc3, voutput_min_less_zero_point);
- vrndacc0 = XNN_UNPREDICTABLE(vrndacc0 < voutput_min_less_zero_point) ? voutput_min_less_zero_point : vrndacc0;
- vrndacc1 = XNN_UNPREDICTABLE(vrndacc1 < voutput_min_less_zero_point) ? voutput_min_less_zero_point : vrndacc1;
- vrndacc2 = XNN_UNPREDICTABLE(vrndacc2 < voutput_min_less_zero_point) ? voutput_min_less_zero_point : vrndacc2;
- vrndacc3 = XNN_UNPREDICTABLE(vrndacc3 < voutput_min_less_zero_point) ? voutput_min_less_zero_point : vrndacc3;
+ vfpacc0 = math_min_f32(vfpacc0, voutput_max_less_zero_point);
+ vfpacc1 = math_min_f32(vfpacc1, voutput_max_less_zero_point);
+ vfpacc2 = math_min_f32(vfpacc2, voutput_max_less_zero_point);
+ vfpacc3 = math_min_f32(vfpacc3, voutput_max_less_zero_point);
- vrndacc0 = XNN_UNPREDICTABLE(vrndacc0 > voutput_max_less_zero_point) ? voutput_max_less_zero_point : vrndacc0;
- vrndacc1 = XNN_UNPREDICTABLE(vrndacc1 > voutput_max_less_zero_point) ? voutput_max_less_zero_point : vrndacc1;
- vrndacc2 = XNN_UNPREDICTABLE(vrndacc2 > voutput_max_less_zero_point) ? voutput_max_less_zero_point : vrndacc2;
- vrndacc3 = XNN_UNPREDICTABLE(vrndacc3 > voutput_max_less_zero_point) ? voutput_max_less_zero_point : vrndacc3;
+ const int32_t vrndacc0 = (int32_t) lrintf(vfpacc0);
+ const int32_t vrndacc1 = (int32_t) lrintf(vfpacc1);
+ const int32_t vrndacc2 = (int32_t) lrintf(vfpacc2);
+ const int32_t vrndacc3 = (int32_t) lrintf(vfpacc3);
int32_t vout0 = (int32_t) vrndacc0 + voutput_zero_point;
int32_t vout1 = (int32_t) vrndacc1 + voutput_zero_point;
@@ -697,11 +697,11 @@
k += 1;
const float vscale = *((const float*) ((uintptr_t) w + 3 * sizeof(int32_t) + 100 * sizeof(int8_t)));
- const float vfpacc = (float) vacc * vscale;
- long vrndacc = lrintf(vfpacc);
- vrndacc = XNN_UNPREDICTABLE(vrndacc < voutput_min_less_zero_point) ? voutput_min_less_zero_point : vrndacc;
- vrndacc = XNN_UNPREDICTABLE(vrndacc > voutput_max_less_zero_point) ? voutput_max_less_zero_point : vrndacc;
- int32_t vout = (int32_t) vrndacc + voutput_zero_point;
+ float vfpacc = (float) vacc * vscale;
+ vfpacc = math_max_f32(vfpacc, voutput_min_less_zero_point);
+ vfpacc = math_min_f32(vfpacc, voutput_max_less_zero_point);
+ const int32_t vrndacc = (int32_t) lrintf(vfpacc);
+ int32_t vout = vrndacc + voutput_zero_point;
*output++ = (int8_t) vout;
} while (--c != 0);
diff --git a/src/qc8-dwconv/gen/up4x9-minmax-fp32-scalar-lrint.c b/src/qc8-dwconv/gen/up4x9-minmax-fp32-scalar-fmagic.c
similarity index 84%
copy from src/qc8-dwconv/gen/up4x9-minmax-fp32-scalar-lrint.c
copy to src/qc8-dwconv/gen/up4x9-minmax-fp32-scalar-fmagic.c
index ae94505..68300c2 100644
--- a/src/qc8-dwconv/gen/up4x9-minmax-fp32-scalar-lrint.c
+++ b/src/qc8-dwconv/gen/up4x9-minmax-fp32-scalar-fmagic.c
@@ -8,13 +8,14 @@
// LICENSE file in the root directory of this source tree.
#include <assert.h>
-#include <math.h>
+
+#include <fp16.h>
#include <xnnpack/dwconv.h>
#include <xnnpack/math.h>
-void xnn_qc8_dwconv_minmax_fp32_ukernel_up4x9__scalar_lrint(
+void xnn_qc8_dwconv_minmax_fp32_ukernel_up4x9__scalar_fmagic(
size_t channels,
size_t output_width,
const int8_t** input,
@@ -29,9 +30,10 @@
assert(channels != 0);
assert(output_width != 0);
- const long voutput_min_less_zero_point = params->scalar_lrint.output_min_less_zero_point;
- const long voutput_max_less_zero_point = params->scalar_lrint.output_max_less_zero_point;
- const int32_t voutput_zero_point = params->scalar_lrint.output_zero_point;
+ const float voutput_min_less_zero_point = params->scalar_fmagic.output_min_less_zero_point;
+ const float voutput_max_less_zero_point = params->scalar_fmagic.output_max_less_zero_point;
+ const float vmagic_bias = params->scalar_fmagic.magic_bias;
+ const int32_t vmagic_bias_less_output_zero_point = params->scalar_fmagic.magic_bias_less_output_zero_point;
do {
const int8_t* i0 = input[0];
assert(i0 != NULL);
@@ -251,25 +253,25 @@
vfpacc2 *= vscale2;
vfpacc3 *= vscale3;
- long vrndacc0 = lrintf(vfpacc0);
- long vrndacc1 = lrintf(vfpacc1);
- long vrndacc2 = lrintf(vfpacc2);
- long vrndacc3 = lrintf(vfpacc3);
+ vfpacc0 = math_max_f32(vfpacc0, voutput_min_less_zero_point);
+ vfpacc1 = math_max_f32(vfpacc1, voutput_min_less_zero_point);
+ vfpacc2 = math_max_f32(vfpacc2, voutput_min_less_zero_point);
+ vfpacc3 = math_max_f32(vfpacc3, voutput_min_less_zero_point);
- vrndacc0 = XNN_UNPREDICTABLE(vrndacc0 < voutput_min_less_zero_point) ? voutput_min_less_zero_point : vrndacc0;
- vrndacc1 = XNN_UNPREDICTABLE(vrndacc1 < voutput_min_less_zero_point) ? voutput_min_less_zero_point : vrndacc1;
- vrndacc2 = XNN_UNPREDICTABLE(vrndacc2 < voutput_min_less_zero_point) ? voutput_min_less_zero_point : vrndacc2;
- vrndacc3 = XNN_UNPREDICTABLE(vrndacc3 < voutput_min_less_zero_point) ? voutput_min_less_zero_point : vrndacc3;
+ vfpacc0 = math_min_f32(vfpacc0, voutput_max_less_zero_point);
+ vfpacc1 = math_min_f32(vfpacc1, voutput_max_less_zero_point);
+ vfpacc2 = math_min_f32(vfpacc2, voutput_max_less_zero_point);
+ vfpacc3 = math_min_f32(vfpacc3, voutput_max_less_zero_point);
- vrndacc0 = XNN_UNPREDICTABLE(vrndacc0 > voutput_max_less_zero_point) ? voutput_max_less_zero_point : vrndacc0;
- vrndacc1 = XNN_UNPREDICTABLE(vrndacc1 > voutput_max_less_zero_point) ? voutput_max_less_zero_point : vrndacc1;
- vrndacc2 = XNN_UNPREDICTABLE(vrndacc2 > voutput_max_less_zero_point) ? voutput_max_less_zero_point : vrndacc2;
- vrndacc3 = XNN_UNPREDICTABLE(vrndacc3 > voutput_max_less_zero_point) ? voutput_max_less_zero_point : vrndacc3;
+ vfpacc0 += vmagic_bias;
+ vfpacc1 += vmagic_bias;
+ vfpacc2 += vmagic_bias;
+ vfpacc3 += vmagic_bias;
- int32_t vout0 = (int32_t) vrndacc0 + voutput_zero_point;
- int32_t vout1 = (int32_t) vrndacc1 + voutput_zero_point;
- int32_t vout2 = (int32_t) vrndacc2 + voutput_zero_point;
- int32_t vout3 = (int32_t) vrndacc3 + voutput_zero_point;
+ int32_t vout0 = (int32_t) fp32_to_bits(vfpacc0) - vmagic_bias_less_output_zero_point;
+ int32_t vout1 = (int32_t) fp32_to_bits(vfpacc1) - vmagic_bias_less_output_zero_point;
+ int32_t vout2 = (int32_t) fp32_to_bits(vfpacc2) - vmagic_bias_less_output_zero_point;
+ int32_t vout3 = (int32_t) fp32_to_bits(vfpacc3) - vmagic_bias_less_output_zero_point;
output[0] = (int8_t) vout0;
output[1] = (int8_t) vout1;
@@ -313,11 +315,11 @@
k += 1;
const float vscale = *((const float*) ((uintptr_t) w + 3 * sizeof(int32_t) + 36 * sizeof(int8_t)));
- const float vfpacc = (float) vacc * vscale;
- long vrndacc = lrintf(vfpacc);
- vrndacc = XNN_UNPREDICTABLE(vrndacc < voutput_min_less_zero_point) ? voutput_min_less_zero_point : vrndacc;
- vrndacc = XNN_UNPREDICTABLE(vrndacc > voutput_max_less_zero_point) ? voutput_max_less_zero_point : vrndacc;
- int32_t vout = (int32_t) vrndacc + voutput_zero_point;
+ float vfpacc = (float) vacc * vscale;
+ vfpacc = math_max_f32(vfpacc, voutput_min_less_zero_point);
+ vfpacc = math_min_f32(vfpacc, voutput_max_less_zero_point);
+ vfpacc += vmagic_bias;
+ int32_t vout = (int32_t) fp32_to_bits(vfpacc) - vmagic_bias_less_output_zero_point;
*output++ = (int8_t) vout;
} while (--c != 0);
diff --git a/src/qc8-dwconv/gen/up4x9-minmax-fp32-scalar-lrint.c b/src/qc8-dwconv/gen/up4x9-minmax-fp32-scalar-lrintf.c
similarity index 86%
rename from src/qc8-dwconv/gen/up4x9-minmax-fp32-scalar-lrint.c
rename to src/qc8-dwconv/gen/up4x9-minmax-fp32-scalar-lrintf.c
index ae94505..a1b3366 100644
--- a/src/qc8-dwconv/gen/up4x9-minmax-fp32-scalar-lrint.c
+++ b/src/qc8-dwconv/gen/up4x9-minmax-fp32-scalar-lrintf.c
@@ -14,7 +14,7 @@
#include <xnnpack/math.h>
-void xnn_qc8_dwconv_minmax_fp32_ukernel_up4x9__scalar_lrint(
+void xnn_qc8_dwconv_minmax_fp32_ukernel_up4x9__scalar_lrintf(
size_t channels,
size_t output_width,
const int8_t** input,
@@ -29,9 +29,9 @@
assert(channels != 0);
assert(output_width != 0);
- const long voutput_min_less_zero_point = params->scalar_lrint.output_min_less_zero_point;
- const long voutput_max_less_zero_point = params->scalar_lrint.output_max_less_zero_point;
- const int32_t voutput_zero_point = params->scalar_lrint.output_zero_point;
+ const float voutput_min_less_zero_point = params->scalar_lrintf.output_min_less_zero_point;
+ const float voutput_max_less_zero_point = params->scalar_lrintf.output_max_less_zero_point;
+ const int32_t voutput_zero_point = params->scalar_lrintf.output_zero_point;
do {
const int8_t* i0 = input[0];
assert(i0 != NULL);
@@ -251,20 +251,20 @@
vfpacc2 *= vscale2;
vfpacc3 *= vscale3;
- long vrndacc0 = lrintf(vfpacc0);
- long vrndacc1 = lrintf(vfpacc1);
- long vrndacc2 = lrintf(vfpacc2);
- long vrndacc3 = lrintf(vfpacc3);
+ vfpacc0 = math_max_f32(vfpacc0, voutput_min_less_zero_point);
+ vfpacc1 = math_max_f32(vfpacc1, voutput_min_less_zero_point);
+ vfpacc2 = math_max_f32(vfpacc2, voutput_min_less_zero_point);
+ vfpacc3 = math_max_f32(vfpacc3, voutput_min_less_zero_point);
- vrndacc0 = XNN_UNPREDICTABLE(vrndacc0 < voutput_min_less_zero_point) ? voutput_min_less_zero_point : vrndacc0;
- vrndacc1 = XNN_UNPREDICTABLE(vrndacc1 < voutput_min_less_zero_point) ? voutput_min_less_zero_point : vrndacc1;
- vrndacc2 = XNN_UNPREDICTABLE(vrndacc2 < voutput_min_less_zero_point) ? voutput_min_less_zero_point : vrndacc2;
- vrndacc3 = XNN_UNPREDICTABLE(vrndacc3 < voutput_min_less_zero_point) ? voutput_min_less_zero_point : vrndacc3;
+ vfpacc0 = math_min_f32(vfpacc0, voutput_max_less_zero_point);
+ vfpacc1 = math_min_f32(vfpacc1, voutput_max_less_zero_point);
+ vfpacc2 = math_min_f32(vfpacc2, voutput_max_less_zero_point);
+ vfpacc3 = math_min_f32(vfpacc3, voutput_max_less_zero_point);
- vrndacc0 = XNN_UNPREDICTABLE(vrndacc0 > voutput_max_less_zero_point) ? voutput_max_less_zero_point : vrndacc0;
- vrndacc1 = XNN_UNPREDICTABLE(vrndacc1 > voutput_max_less_zero_point) ? voutput_max_less_zero_point : vrndacc1;
- vrndacc2 = XNN_UNPREDICTABLE(vrndacc2 > voutput_max_less_zero_point) ? voutput_max_less_zero_point : vrndacc2;
- vrndacc3 = XNN_UNPREDICTABLE(vrndacc3 > voutput_max_less_zero_point) ? voutput_max_less_zero_point : vrndacc3;
+ const int32_t vrndacc0 = (int32_t) lrintf(vfpacc0);
+ const int32_t vrndacc1 = (int32_t) lrintf(vfpacc1);
+ const int32_t vrndacc2 = (int32_t) lrintf(vfpacc2);
+ const int32_t vrndacc3 = (int32_t) lrintf(vfpacc3);
int32_t vout0 = (int32_t) vrndacc0 + voutput_zero_point;
int32_t vout1 = (int32_t) vrndacc1 + voutput_zero_point;
@@ -313,11 +313,11 @@
k += 1;
const float vscale = *((const float*) ((uintptr_t) w + 3 * sizeof(int32_t) + 36 * sizeof(int8_t)));
- const float vfpacc = (float) vacc * vscale;
- long vrndacc = lrintf(vfpacc);
- vrndacc = XNN_UNPREDICTABLE(vrndacc < voutput_min_less_zero_point) ? voutput_min_less_zero_point : vrndacc;
- vrndacc = XNN_UNPREDICTABLE(vrndacc > voutput_max_less_zero_point) ? voutput_max_less_zero_point : vrndacc;
- int32_t vout = (int32_t) vrndacc + voutput_zero_point;
+ float vfpacc = (float) vacc * vscale;
+ vfpacc = math_max_f32(vfpacc, voutput_min_less_zero_point);
+ vfpacc = math_min_f32(vfpacc, voutput_max_less_zero_point);
+ const int32_t vrndacc = (int32_t) lrintf(vfpacc);
+ int32_t vout = vrndacc + voutput_zero_point;
*output++ = (int8_t) vout;
} while (--c != 0);
diff --git a/src/qc8-gemm/gen/1x2-minmax-fp32-scalar-magic.c b/src/qc8-gemm/gen/1x2-minmax-fp32-scalar-fmagic.c
similarity index 87%
rename from src/qc8-gemm/gen/1x2-minmax-fp32-scalar-magic.c
rename to src/qc8-gemm/gen/1x2-minmax-fp32-scalar-fmagic.c
index 922e597..107b1d6 100644
--- a/src/qc8-gemm/gen/1x2-minmax-fp32-scalar-magic.c
+++ b/src/qc8-gemm/gen/1x2-minmax-fp32-scalar-fmagic.c
@@ -15,7 +15,7 @@
#include <xnnpack/gemm.h>
-void xnn_qc8_gemm_minmax_fp32_ukernel_1x2__scalar_magic(
+void xnn_qc8_gemm_minmax_fp32_ukernel_1x2__scalar_fmagic(
size_t mr,
size_t nc,
size_t kc,
@@ -64,19 +64,19 @@
vfpacc0x1 *= vscale1;
w = (const void*) ((const float*) w + 2);
- const float voutput_min_less_zero_point = params->scalar_magic.output_min_less_zero_point;
+ const float voutput_min_less_zero_point = params->scalar_fmagic.output_min_less_zero_point;
vfpacc0x0 = math_max_f32(vfpacc0x0, voutput_min_less_zero_point);
vfpacc0x1 = math_max_f32(vfpacc0x1, voutput_min_less_zero_point);
- const float voutput_max_less_zero_point = params->scalar_magic.output_max_less_zero_point;
+ const float voutput_max_less_zero_point = params->scalar_fmagic.output_max_less_zero_point;
vfpacc0x0 = math_min_f32(vfpacc0x0, voutput_max_less_zero_point);
vfpacc0x1 = math_min_f32(vfpacc0x1, voutput_max_less_zero_point);
- const float vmagic_bias = params->scalar_magic.magic_bias;
+ const float vmagic_bias = params->scalar_fmagic.magic_bias;
vfpacc0x0 += vmagic_bias;
vfpacc0x1 += vmagic_bias;
- const int32_t vmagic_bias_less_output_zero_point = params->scalar_magic.magic_bias_less_output_zero_point;
+ const int32_t vmagic_bias_less_output_zero_point = params->scalar_fmagic.magic_bias_less_output_zero_point;
int32_t vout0x0 = (int32_t) fp32_to_bits(vfpacc0x0) - vmagic_bias_less_output_zero_point;
int32_t vout0x1 = (int32_t) fp32_to_bits(vfpacc0x1) - vmagic_bias_less_output_zero_point;
diff --git a/src/qc8-gemm/gen/1x2-minmax-fp32-scalar-lrint.c b/src/qc8-gemm/gen/1x2-minmax-fp32-scalar-lrint.c
deleted file mode 100644
index d363141..0000000
--- a/src/qc8-gemm/gen/1x2-minmax-fp32-scalar-lrint.c
+++ /dev/null
@@ -1,98 +0,0 @@
-// Auto-generated file. Do not edit!
-// Template: src/qs8-gemm/scalar.c.in
-// Generator: tools/xngen
-//
-// Copyright 2021 Google LLC
-//
-// This source code is licensed under the BSD-style license found in the
-// LICENSE file in the root directory of this source tree.
-
-#include <assert.h>
-#include <math.h>
-
-#include <xnnpack/math.h>
-#include <xnnpack/gemm.h>
-
-
-void xnn_qc8_gemm_minmax_fp32_ukernel_1x2__scalar_lrint(
- size_t mr,
- size_t nc,
- size_t kc,
- const int8_t* restrict a,
- size_t a_stride,
- const void* restrict w,
- int8_t* restrict c,
- size_t cm_stride,
- size_t cn_stride,
- const union xnn_qs8_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
-{
- assert(mr != 0);
- assert(mr <= 1);
- assert(nc != 0);
- assert(kc != 0);
-
- const int8_t* a0 = a;
- int8_t* c0 = c;
-
- do {
- int32_t vacc0x0 = ((const int32_t*) w)[0];
- int32_t vacc0x1 = ((const int32_t*) w)[1];
- w = (const void*) ((const int32_t*) w + 2);
-
- size_t k = kc;
- do {
- const int32_t va0 = (int32_t) *a0++;
-
- const int32_t vb0 = (int32_t) ((const int8_t*) w)[0];
- const int32_t vb1 = (int32_t) ((const int8_t*) w)[1];
- w = (const void*) ((const int8_t*) w + 2);
-
- vacc0x0 += va0 * vb0;
- vacc0x1 += va0 * vb1;
-
- k -= sizeof(int8_t);
- } while (k != 0);
-
- float vfpacc0x0 = (float) vacc0x0;
- float vfpacc0x1 = (float) vacc0x1;
-
- typedef XNN_UNALIGNED float unaligned_float;
- const float vscale0 = ((const unaligned_float*) w)[0];
- vfpacc0x0 *= vscale0;
- const float vscale1 = ((const unaligned_float*) w)[1];
- vfpacc0x1 *= vscale1;
- w = (const void*) ((const float*) w + 2);
-
- long vrndacc0x0 = lrintf(vfpacc0x0);
- long vrndacc0x1 = lrintf(vfpacc0x1);
-
- const long voutput_min_less_zero_point = params->scalar_lrint.output_min_less_zero_point;
- vrndacc0x0 = XNN_UNPREDICTABLE(vrndacc0x0 < voutput_min_less_zero_point) ? voutput_min_less_zero_point : vrndacc0x0;
- vrndacc0x1 = XNN_UNPREDICTABLE(vrndacc0x1 < voutput_min_less_zero_point) ? voutput_min_less_zero_point : vrndacc0x1;
-
- const long voutput_max_less_zero_point = params->scalar_lrint.output_max_less_zero_point;
- vrndacc0x0 = XNN_UNPREDICTABLE(vrndacc0x0 > voutput_max_less_zero_point) ? voutput_max_less_zero_point : vrndacc0x0;
- vrndacc0x1 = XNN_UNPREDICTABLE(vrndacc0x1 > voutput_max_less_zero_point) ? voutput_max_less_zero_point : vrndacc0x1;
-
- const int32_t voutput_zero_point = params->scalar_lrint.output_zero_point;
- int32_t vout0x0 = (int32_t) vrndacc0x0 + voutput_zero_point;
- int32_t vout0x1 = (int32_t) vrndacc0x1 + voutput_zero_point;
-
- if XNN_LIKELY(nc >= 2) {
- c0[0] = (int8_t) vout0x0;
- c0[1] = (int8_t) vout0x1;
-
- a0 = (const int8_t*) ((uintptr_t) a0 - kc);
-
- c0 = (int8_t*) ((uintptr_t) c0 + cn_stride);
-
- nc -= 2;
- } else {
- if (nc & 1) {
- c0[0] = (int8_t) vout0x0;
- }
-
- nc = 0;
- }
- } while (nc != 0);
-}
diff --git a/src/qc8-gemm/gen/1x2-minmax-fp32-scalar-magic.c b/src/qc8-gemm/gen/1x2-minmax-fp32-scalar-lrintf.c
similarity index 75%
copy from src/qc8-gemm/gen/1x2-minmax-fp32-scalar-magic.c
copy to src/qc8-gemm/gen/1x2-minmax-fp32-scalar-lrintf.c
index 922e597..fbf875e 100644
--- a/src/qc8-gemm/gen/1x2-minmax-fp32-scalar-magic.c
+++ b/src/qc8-gemm/gen/1x2-minmax-fp32-scalar-lrintf.c
@@ -8,14 +8,13 @@
// LICENSE file in the root directory of this source tree.
#include <assert.h>
-
-#include <fp16.h>
+#include <math.h>
#include <xnnpack/math.h>
#include <xnnpack/gemm.h>
-void xnn_qc8_gemm_minmax_fp32_ukernel_1x2__scalar_magic(
+void xnn_qc8_gemm_minmax_fp32_ukernel_1x2__scalar_lrintf(
size_t mr,
size_t nc,
size_t kc,
@@ -64,21 +63,20 @@
vfpacc0x1 *= vscale1;
w = (const void*) ((const float*) w + 2);
- const float voutput_min_less_zero_point = params->scalar_magic.output_min_less_zero_point;
+ const float voutput_min_less_zero_point = params->scalar_lrintf.output_min_less_zero_point;
vfpacc0x0 = math_max_f32(vfpacc0x0, voutput_min_less_zero_point);
vfpacc0x1 = math_max_f32(vfpacc0x1, voutput_min_less_zero_point);
- const float voutput_max_less_zero_point = params->scalar_magic.output_max_less_zero_point;
+ const float voutput_max_less_zero_point = params->scalar_lrintf.output_max_less_zero_point;
vfpacc0x0 = math_min_f32(vfpacc0x0, voutput_max_less_zero_point);
vfpacc0x1 = math_min_f32(vfpacc0x1, voutput_max_less_zero_point);
- const float vmagic_bias = params->scalar_magic.magic_bias;
- vfpacc0x0 += vmagic_bias;
- vfpacc0x1 += vmagic_bias;
+ const int32_t vrndacc0x0 = (int32_t) lrintf(vfpacc0x0);
+ const int32_t vrndacc0x1 = (int32_t) lrintf(vfpacc0x1);
- const int32_t vmagic_bias_less_output_zero_point = params->scalar_magic.magic_bias_less_output_zero_point;
- int32_t vout0x0 = (int32_t) fp32_to_bits(vfpacc0x0) - vmagic_bias_less_output_zero_point;
- int32_t vout0x1 = (int32_t) fp32_to_bits(vfpacc0x1) - vmagic_bias_less_output_zero_point;
+ const int32_t voutput_zero_point = params->scalar_lrintf.output_zero_point;
+ int32_t vout0x0 = vrndacc0x0 + voutput_zero_point;
+ int32_t vout0x1 = vrndacc0x1 + voutput_zero_point;
if XNN_LIKELY(nc >= 2) {
c0[0] = (int8_t) vout0x0;
diff --git a/src/qc8-gemm/gen/1x4-minmax-fp32-scalar-magic.c b/src/qc8-gemm/gen/1x4-minmax-fp32-scalar-fmagic.c
similarity index 90%
rename from src/qc8-gemm/gen/1x4-minmax-fp32-scalar-magic.c
rename to src/qc8-gemm/gen/1x4-minmax-fp32-scalar-fmagic.c
index 1c92510..afb0637 100644
--- a/src/qc8-gemm/gen/1x4-minmax-fp32-scalar-magic.c
+++ b/src/qc8-gemm/gen/1x4-minmax-fp32-scalar-fmagic.c
@@ -15,7 +15,7 @@
#include <xnnpack/gemm.h>
-void xnn_qc8_gemm_minmax_fp32_ukernel_1x4__scalar_magic(
+void xnn_qc8_gemm_minmax_fp32_ukernel_1x4__scalar_fmagic(
size_t mr,
size_t nc,
size_t kc,
@@ -75,25 +75,25 @@
vfpacc0x3 *= vscale3;
w = (const void*) ((const float*) w + 4);
- const float voutput_min_less_zero_point = params->scalar_magic.output_min_less_zero_point;
+ const float voutput_min_less_zero_point = params->scalar_fmagic.output_min_less_zero_point;
vfpacc0x0 = math_max_f32(vfpacc0x0, voutput_min_less_zero_point);
vfpacc0x1 = math_max_f32(vfpacc0x1, voutput_min_less_zero_point);
vfpacc0x2 = math_max_f32(vfpacc0x2, voutput_min_less_zero_point);
vfpacc0x3 = math_max_f32(vfpacc0x3, voutput_min_less_zero_point);
- const float voutput_max_less_zero_point = params->scalar_magic.output_max_less_zero_point;
+ const float voutput_max_less_zero_point = params->scalar_fmagic.output_max_less_zero_point;
vfpacc0x0 = math_min_f32(vfpacc0x0, voutput_max_less_zero_point);
vfpacc0x1 = math_min_f32(vfpacc0x1, voutput_max_less_zero_point);
vfpacc0x2 = math_min_f32(vfpacc0x2, voutput_max_less_zero_point);
vfpacc0x3 = math_min_f32(vfpacc0x3, voutput_max_less_zero_point);
- const float vmagic_bias = params->scalar_magic.magic_bias;
+ const float vmagic_bias = params->scalar_fmagic.magic_bias;
vfpacc0x0 += vmagic_bias;
vfpacc0x1 += vmagic_bias;
vfpacc0x2 += vmagic_bias;
vfpacc0x3 += vmagic_bias;
- const int32_t vmagic_bias_less_output_zero_point = params->scalar_magic.magic_bias_less_output_zero_point;
+ const int32_t vmagic_bias_less_output_zero_point = params->scalar_fmagic.magic_bias_less_output_zero_point;
int32_t vout0x0 = (int32_t) fp32_to_bits(vfpacc0x0) - vmagic_bias_less_output_zero_point;
int32_t vout0x1 = (int32_t) fp32_to_bits(vfpacc0x1) - vmagic_bias_less_output_zero_point;
int32_t vout0x2 = (int32_t) fp32_to_bits(vfpacc0x2) - vmagic_bias_less_output_zero_point;
diff --git a/src/qc8-gemm/gen/1x4-minmax-fp32-scalar-lrint.c b/src/qc8-gemm/gen/1x4-minmax-fp32-scalar-lrint.c
deleted file mode 100644
index 7c16fa6..0000000
--- a/src/qc8-gemm/gen/1x4-minmax-fp32-scalar-lrint.c
+++ /dev/null
@@ -1,125 +0,0 @@
-// Auto-generated file. Do not edit!
-// Template: src/qs8-gemm/scalar.c.in
-// Generator: tools/xngen
-//
-// Copyright 2021 Google LLC
-//
-// This source code is licensed under the BSD-style license found in the
-// LICENSE file in the root directory of this source tree.
-
-#include <assert.h>
-#include <math.h>
-
-#include <xnnpack/math.h>
-#include <xnnpack/gemm.h>
-
-
-void xnn_qc8_gemm_minmax_fp32_ukernel_1x4__scalar_lrint(
- size_t mr,
- size_t nc,
- size_t kc,
- const int8_t* restrict a,
- size_t a_stride,
- const void* restrict w,
- int8_t* restrict c,
- size_t cm_stride,
- size_t cn_stride,
- const union xnn_qs8_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
-{
- assert(mr != 0);
- assert(mr <= 1);
- assert(nc != 0);
- assert(kc != 0);
-
- const int8_t* a0 = a;
- int8_t* c0 = c;
-
- do {
- int32_t vacc0x0 = ((const int32_t*) w)[0];
- int32_t vacc0x1 = ((const int32_t*) w)[1];
- int32_t vacc0x2 = ((const int32_t*) w)[2];
- int32_t vacc0x3 = ((const int32_t*) w)[3];
- w = (const void*) ((const int32_t*) w + 4);
-
- size_t k = kc;
- do {
- const int32_t va0 = (int32_t) *a0++;
-
- const int32_t vb0 = (int32_t) ((const int8_t*) w)[0];
- const int32_t vb1 = (int32_t) ((const int8_t*) w)[1];
- const int32_t vb2 = (int32_t) ((const int8_t*) w)[2];
- const int32_t vb3 = (int32_t) ((const int8_t*) w)[3];
- w = (const void*) ((const int8_t*) w + 4);
-
- vacc0x0 += va0 * vb0;
- vacc0x1 += va0 * vb1;
- vacc0x2 += va0 * vb2;
- vacc0x3 += va0 * vb3;
-
- k -= sizeof(int8_t);
- } while (k != 0);
-
- float vfpacc0x0 = (float) vacc0x0;
- float vfpacc0x1 = (float) vacc0x1;
- float vfpacc0x2 = (float) vacc0x2;
- float vfpacc0x3 = (float) vacc0x3;
-
- const float vscale0 = ((const float*) w)[0];
- vfpacc0x0 *= vscale0;
- const float vscale1 = ((const float*) w)[1];
- vfpacc0x1 *= vscale1;
- const float vscale2 = ((const float*) w)[2];
- vfpacc0x2 *= vscale2;
- const float vscale3 = ((const float*) w)[3];
- vfpacc0x3 *= vscale3;
- w = (const void*) ((const float*) w + 4);
-
- long vrndacc0x0 = lrintf(vfpacc0x0);
- long vrndacc0x1 = lrintf(vfpacc0x1);
- long vrndacc0x2 = lrintf(vfpacc0x2);
- long vrndacc0x3 = lrintf(vfpacc0x3);
-
- const long voutput_min_less_zero_point = params->scalar_lrint.output_min_less_zero_point;
- vrndacc0x0 = XNN_UNPREDICTABLE(vrndacc0x0 < voutput_min_less_zero_point) ? voutput_min_less_zero_point : vrndacc0x0;
- vrndacc0x1 = XNN_UNPREDICTABLE(vrndacc0x1 < voutput_min_less_zero_point) ? voutput_min_less_zero_point : vrndacc0x1;
- vrndacc0x2 = XNN_UNPREDICTABLE(vrndacc0x2 < voutput_min_less_zero_point) ? voutput_min_less_zero_point : vrndacc0x2;
- vrndacc0x3 = XNN_UNPREDICTABLE(vrndacc0x3 < voutput_min_less_zero_point) ? voutput_min_less_zero_point : vrndacc0x3;
-
- const long voutput_max_less_zero_point = params->scalar_lrint.output_max_less_zero_point;
- vrndacc0x0 = XNN_UNPREDICTABLE(vrndacc0x0 > voutput_max_less_zero_point) ? voutput_max_less_zero_point : vrndacc0x0;
- vrndacc0x1 = XNN_UNPREDICTABLE(vrndacc0x1 > voutput_max_less_zero_point) ? voutput_max_less_zero_point : vrndacc0x1;
- vrndacc0x2 = XNN_UNPREDICTABLE(vrndacc0x2 > voutput_max_less_zero_point) ? voutput_max_less_zero_point : vrndacc0x2;
- vrndacc0x3 = XNN_UNPREDICTABLE(vrndacc0x3 > voutput_max_less_zero_point) ? voutput_max_less_zero_point : vrndacc0x3;
-
- const int32_t voutput_zero_point = params->scalar_lrint.output_zero_point;
- int32_t vout0x0 = (int32_t) vrndacc0x0 + voutput_zero_point;
- int32_t vout0x1 = (int32_t) vrndacc0x1 + voutput_zero_point;
- int32_t vout0x2 = (int32_t) vrndacc0x2 + voutput_zero_point;
- int32_t vout0x3 = (int32_t) vrndacc0x3 + voutput_zero_point;
-
- if XNN_LIKELY(nc >= 4) {
- c0[0] = (int8_t) vout0x0;
- c0[1] = (int8_t) vout0x1;
- c0[2] = (int8_t) vout0x2;
- c0[3] = (int8_t) vout0x3;
-
- a0 = (const int8_t*) ((uintptr_t) a0 - kc);
-
- c0 = (int8_t*) ((uintptr_t) c0 + cn_stride);
-
- nc -= 4;
- } else {
- if (nc & 2) {
- c0[0] = (int8_t) vout0x0;
- c0[1] = (int8_t) vout0x1;
- vout0x0 = vout0x2;
- c0 += 2;
- }
- if (nc & 1) {
- c0[0] = (int8_t) vout0x0;
- }
-
- nc = 0;
- }
- } while (nc != 0);
-}
diff --git a/src/qc8-gemm/gen/1x4-minmax-fp32-scalar-magic.c b/src/qc8-gemm/gen/1x4-minmax-fp32-scalar-lrintf.c
similarity index 76%
copy from src/qc8-gemm/gen/1x4-minmax-fp32-scalar-magic.c
copy to src/qc8-gemm/gen/1x4-minmax-fp32-scalar-lrintf.c
index 1c92510..e367650 100644
--- a/src/qc8-gemm/gen/1x4-minmax-fp32-scalar-magic.c
+++ b/src/qc8-gemm/gen/1x4-minmax-fp32-scalar-lrintf.c
@@ -8,14 +8,13 @@
// LICENSE file in the root directory of this source tree.
#include <assert.h>
-
-#include <fp16.h>
+#include <math.h>
#include <xnnpack/math.h>
#include <xnnpack/gemm.h>
-void xnn_qc8_gemm_minmax_fp32_ukernel_1x4__scalar_magic(
+void xnn_qc8_gemm_minmax_fp32_ukernel_1x4__scalar_lrintf(
size_t mr,
size_t nc,
size_t kc,
@@ -75,29 +74,28 @@
vfpacc0x3 *= vscale3;
w = (const void*) ((const float*) w + 4);
- const float voutput_min_less_zero_point = params->scalar_magic.output_min_less_zero_point;
+ const float voutput_min_less_zero_point = params->scalar_lrintf.output_min_less_zero_point;
vfpacc0x0 = math_max_f32(vfpacc0x0, voutput_min_less_zero_point);
vfpacc0x1 = math_max_f32(vfpacc0x1, voutput_min_less_zero_point);
vfpacc0x2 = math_max_f32(vfpacc0x2, voutput_min_less_zero_point);
vfpacc0x3 = math_max_f32(vfpacc0x3, voutput_min_less_zero_point);
- const float voutput_max_less_zero_point = params->scalar_magic.output_max_less_zero_point;
+ const float voutput_max_less_zero_point = params->scalar_lrintf.output_max_less_zero_point;
vfpacc0x0 = math_min_f32(vfpacc0x0, voutput_max_less_zero_point);
vfpacc0x1 = math_min_f32(vfpacc0x1, voutput_max_less_zero_point);
vfpacc0x2 = math_min_f32(vfpacc0x2, voutput_max_less_zero_point);
vfpacc0x3 = math_min_f32(vfpacc0x3, voutput_max_less_zero_point);
- const float vmagic_bias = params->scalar_magic.magic_bias;
- vfpacc0x0 += vmagic_bias;
- vfpacc0x1 += vmagic_bias;
- vfpacc0x2 += vmagic_bias;
- vfpacc0x3 += vmagic_bias;
+ const int32_t vrndacc0x0 = (int32_t) lrintf(vfpacc0x0);
+ const int32_t vrndacc0x1 = (int32_t) lrintf(vfpacc0x1);
+ const int32_t vrndacc0x2 = (int32_t) lrintf(vfpacc0x2);
+ const int32_t vrndacc0x3 = (int32_t) lrintf(vfpacc0x3);
- const int32_t vmagic_bias_less_output_zero_point = params->scalar_magic.magic_bias_less_output_zero_point;
- int32_t vout0x0 = (int32_t) fp32_to_bits(vfpacc0x0) - vmagic_bias_less_output_zero_point;
- int32_t vout0x1 = (int32_t) fp32_to_bits(vfpacc0x1) - vmagic_bias_less_output_zero_point;
- int32_t vout0x2 = (int32_t) fp32_to_bits(vfpacc0x2) - vmagic_bias_less_output_zero_point;
- int32_t vout0x3 = (int32_t) fp32_to_bits(vfpacc0x3) - vmagic_bias_less_output_zero_point;
+ const int32_t voutput_zero_point = params->scalar_lrintf.output_zero_point;
+ int32_t vout0x0 = vrndacc0x0 + voutput_zero_point;
+ int32_t vout0x1 = vrndacc0x1 + voutput_zero_point;
+ int32_t vout0x2 = vrndacc0x2 + voutput_zero_point;
+ int32_t vout0x3 = vrndacc0x3 + voutput_zero_point;
if XNN_LIKELY(nc >= 4) {
c0[0] = (int8_t) vout0x0;
diff --git a/src/qc8-gemm/gen/2x2-minmax-fp32-scalar-magic.c b/src/qc8-gemm/gen/2x2-minmax-fp32-scalar-fmagic.c
similarity index 90%
rename from src/qc8-gemm/gen/2x2-minmax-fp32-scalar-magic.c
rename to src/qc8-gemm/gen/2x2-minmax-fp32-scalar-fmagic.c
index c2e4de9..f262791 100644
--- a/src/qc8-gemm/gen/2x2-minmax-fp32-scalar-magic.c
+++ b/src/qc8-gemm/gen/2x2-minmax-fp32-scalar-fmagic.c
@@ -15,7 +15,7 @@
#include <xnnpack/gemm.h>
-void xnn_qc8_gemm_minmax_fp32_ukernel_2x2__scalar_magic(
+void xnn_qc8_gemm_minmax_fp32_ukernel_2x2__scalar_fmagic(
size_t mr,
size_t nc,
size_t kc,
@@ -79,25 +79,25 @@
vfpacc1x1 *= vscale1;
w = (const void*) ((const float*) w + 2);
- const float voutput_min_less_zero_point = params->scalar_magic.output_min_less_zero_point;
+ const float voutput_min_less_zero_point = params->scalar_fmagic.output_min_less_zero_point;
vfpacc0x0 = math_max_f32(vfpacc0x0, voutput_min_less_zero_point);
vfpacc0x1 = math_max_f32(vfpacc0x1, voutput_min_less_zero_point);
vfpacc1x0 = math_max_f32(vfpacc1x0, voutput_min_less_zero_point);
vfpacc1x1 = math_max_f32(vfpacc1x1, voutput_min_less_zero_point);
- const float voutput_max_less_zero_point = params->scalar_magic.output_max_less_zero_point;
+ const float voutput_max_less_zero_point = params->scalar_fmagic.output_max_less_zero_point;
vfpacc0x0 = math_min_f32(vfpacc0x0, voutput_max_less_zero_point);
vfpacc0x1 = math_min_f32(vfpacc0x1, voutput_max_less_zero_point);
vfpacc1x0 = math_min_f32(vfpacc1x0, voutput_max_less_zero_point);
vfpacc1x1 = math_min_f32(vfpacc1x1, voutput_max_less_zero_point);
- const float vmagic_bias = params->scalar_magic.magic_bias;
+ const float vmagic_bias = params->scalar_fmagic.magic_bias;
vfpacc0x0 += vmagic_bias;
vfpacc0x1 += vmagic_bias;
vfpacc1x0 += vmagic_bias;
vfpacc1x1 += vmagic_bias;
- const int32_t vmagic_bias_less_output_zero_point = params->scalar_magic.magic_bias_less_output_zero_point;
+ const int32_t vmagic_bias_less_output_zero_point = params->scalar_fmagic.magic_bias_less_output_zero_point;
int32_t vout0x0 = (int32_t) fp32_to_bits(vfpacc0x0) - vmagic_bias_less_output_zero_point;
int32_t vout0x1 = (int32_t) fp32_to_bits(vfpacc0x1) - vmagic_bias_less_output_zero_point;
int32_t vout1x0 = (int32_t) fp32_to_bits(vfpacc1x0) - vmagic_bias_less_output_zero_point;
diff --git a/src/qc8-gemm/gen/2x2-minmax-fp32-scalar-lrint.c b/src/qc8-gemm/gen/2x2-minmax-fp32-scalar-lrint.c
deleted file mode 100644
index 87007ef..0000000
--- a/src/qc8-gemm/gen/2x2-minmax-fp32-scalar-lrint.c
+++ /dev/null
@@ -1,126 +0,0 @@
-// Auto-generated file. Do not edit!
-// Template: src/qs8-gemm/scalar.c.in
-// Generator: tools/xngen
-//
-// Copyright 2021 Google LLC
-//
-// This source code is licensed under the BSD-style license found in the
-// LICENSE file in the root directory of this source tree.
-
-#include <assert.h>
-#include <math.h>
-
-#include <xnnpack/math.h>
-#include <xnnpack/gemm.h>
-
-
-void xnn_qc8_gemm_minmax_fp32_ukernel_2x2__scalar_lrint(
- size_t mr,
- size_t nc,
- size_t kc,
- const int8_t* restrict a,
- size_t a_stride,
- const void* restrict w,
- int8_t* restrict c,
- size_t cm_stride,
- size_t cn_stride,
- const union xnn_qs8_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
-{
- assert(mr != 0);
- assert(mr <= 2);
- assert(nc != 0);
- assert(kc != 0);
-
- const int8_t* a0 = a;
- int8_t* c0 = c;
- const int8_t* a1 = (const int8_t*) ((uintptr_t) a0 + a_stride);
- int8_t* c1 = (int8_t*) ((uintptr_t) c0 + cm_stride);
- if XNN_UNPREDICTABLE(mr != 2) {
- a1 = a0;
- c1 = c0;
- }
-
- do {
- int32_t vacc0x0 = ((const int32_t*) w)[0];
- int32_t vacc0x1 = ((const int32_t*) w)[1];
- int32_t vacc1x0 = vacc0x0;
- int32_t vacc1x1 = vacc0x1;
- w = (const void*) ((const int32_t*) w + 2);
-
- size_t k = kc;
- do {
- const int32_t va0 = (int32_t) *a0++;
- const int32_t va1 = (int32_t) *a1++;
-
- const int32_t vb0 = (int32_t) ((const int8_t*) w)[0];
- const int32_t vb1 = (int32_t) ((const int8_t*) w)[1];
- w = (const void*) ((const int8_t*) w + 2);
-
- vacc0x0 += va0 * vb0;
- vacc0x1 += va0 * vb1;
- vacc1x0 += va1 * vb0;
- vacc1x1 += va1 * vb1;
-
- k -= sizeof(int8_t);
- } while (k != 0);
-
- float vfpacc0x0 = (float) vacc0x0;
- float vfpacc0x1 = (float) vacc0x1;
- float vfpacc1x0 = (float) vacc1x0;
- float vfpacc1x1 = (float) vacc1x1;
-
- typedef XNN_UNALIGNED float unaligned_float;
- const float vscale0 = ((const unaligned_float*) w)[0];
- vfpacc0x0 *= vscale0;
- vfpacc1x0 *= vscale0;
- const float vscale1 = ((const unaligned_float*) w)[1];
- vfpacc0x1 *= vscale1;
- vfpacc1x1 *= vscale1;
- w = (const void*) ((const float*) w + 2);
-
- long vrndacc0x0 = lrintf(vfpacc0x0);
- long vrndacc0x1 = lrintf(vfpacc0x1);
- long vrndacc1x0 = lrintf(vfpacc1x0);
- long vrndacc1x1 = lrintf(vfpacc1x1);
-
- const long voutput_min_less_zero_point = params->scalar_lrint.output_min_less_zero_point;
- vrndacc0x0 = XNN_UNPREDICTABLE(vrndacc0x0 < voutput_min_less_zero_point) ? voutput_min_less_zero_point : vrndacc0x0;
- vrndacc0x1 = XNN_UNPREDICTABLE(vrndacc0x1 < voutput_min_less_zero_point) ? voutput_min_less_zero_point : vrndacc0x1;
- vrndacc1x0 = XNN_UNPREDICTABLE(vrndacc1x0 < voutput_min_less_zero_point) ? voutput_min_less_zero_point : vrndacc1x0;
- vrndacc1x1 = XNN_UNPREDICTABLE(vrndacc1x1 < voutput_min_less_zero_point) ? voutput_min_less_zero_point : vrndacc1x1;
-
- const long voutput_max_less_zero_point = params->scalar_lrint.output_max_less_zero_point;
- vrndacc0x0 = XNN_UNPREDICTABLE(vrndacc0x0 > voutput_max_less_zero_point) ? voutput_max_less_zero_point : vrndacc0x0;
- vrndacc0x1 = XNN_UNPREDICTABLE(vrndacc0x1 > voutput_max_less_zero_point) ? voutput_max_less_zero_point : vrndacc0x1;
- vrndacc1x0 = XNN_UNPREDICTABLE(vrndacc1x0 > voutput_max_less_zero_point) ? voutput_max_less_zero_point : vrndacc1x0;
- vrndacc1x1 = XNN_UNPREDICTABLE(vrndacc1x1 > voutput_max_less_zero_point) ? voutput_max_less_zero_point : vrndacc1x1;
-
- const int32_t voutput_zero_point = params->scalar_lrint.output_zero_point;
- int32_t vout0x0 = (int32_t) vrndacc0x0 + voutput_zero_point;
- int32_t vout0x1 = (int32_t) vrndacc0x1 + voutput_zero_point;
- int32_t vout1x0 = (int32_t) vrndacc1x0 + voutput_zero_point;
- int32_t vout1x1 = (int32_t) vrndacc1x1 + voutput_zero_point;
-
- if XNN_LIKELY(nc >= 2) {
- c0[0] = (int8_t) vout0x0;
- c0[1] = (int8_t) vout0x1;
- c1[0] = (int8_t) vout1x0;
- c1[1] = (int8_t) vout1x1;
-
- a0 = (const int8_t*) ((uintptr_t) a0 - kc);
- a1 = (const int8_t*) ((uintptr_t) a1 - kc);
-
- c0 = (int8_t*) ((uintptr_t) c0 + cn_stride);
- c1 = (int8_t*) ((uintptr_t) c1 + cn_stride);
-
- nc -= 2;
- } else {
- if (nc & 1) {
- c0[0] = (int8_t) vout0x0;
- c1[0] = (int8_t) vout1x0;
- }
-
- nc = 0;
- }
- } while (nc != 0);
-}
diff --git a/src/qc8-gemm/gen/2x2-minmax-fp32-scalar-magic.c b/src/qc8-gemm/gen/2x2-minmax-fp32-scalar-lrintf.c
similarity index 76%
copy from src/qc8-gemm/gen/2x2-minmax-fp32-scalar-magic.c
copy to src/qc8-gemm/gen/2x2-minmax-fp32-scalar-lrintf.c
index c2e4de9..4cbd2b4 100644
--- a/src/qc8-gemm/gen/2x2-minmax-fp32-scalar-magic.c
+++ b/src/qc8-gemm/gen/2x2-minmax-fp32-scalar-lrintf.c
@@ -8,14 +8,13 @@
// LICENSE file in the root directory of this source tree.
#include <assert.h>
-
-#include <fp16.h>
+#include <math.h>
#include <xnnpack/math.h>
#include <xnnpack/gemm.h>
-void xnn_qc8_gemm_minmax_fp32_ukernel_2x2__scalar_magic(
+void xnn_qc8_gemm_minmax_fp32_ukernel_2x2__scalar_lrintf(
size_t mr,
size_t nc,
size_t kc,
@@ -79,29 +78,28 @@
vfpacc1x1 *= vscale1;
w = (const void*) ((const float*) w + 2);
- const float voutput_min_less_zero_point = params->scalar_magic.output_min_less_zero_point;
+ const float voutput_min_less_zero_point = params->scalar_lrintf.output_min_less_zero_point;
vfpacc0x0 = math_max_f32(vfpacc0x0, voutput_min_less_zero_point);
vfpacc0x1 = math_max_f32(vfpacc0x1, voutput_min_less_zero_point);
vfpacc1x0 = math_max_f32(vfpacc1x0, voutput_min_less_zero_point);
vfpacc1x1 = math_max_f32(vfpacc1x1, voutput_min_less_zero_point);
- const float voutput_max_less_zero_point = params->scalar_magic.output_max_less_zero_point;
+ const float voutput_max_less_zero_point = params->scalar_lrintf.output_max_less_zero_point;
vfpacc0x0 = math_min_f32(vfpacc0x0, voutput_max_less_zero_point);
vfpacc0x1 = math_min_f32(vfpacc0x1, voutput_max_less_zero_point);
vfpacc1x0 = math_min_f32(vfpacc1x0, voutput_max_less_zero_point);
vfpacc1x1 = math_min_f32(vfpacc1x1, voutput_max_less_zero_point);
- const float vmagic_bias = params->scalar_magic.magic_bias;
- vfpacc0x0 += vmagic_bias;
- vfpacc0x1 += vmagic_bias;
- vfpacc1x0 += vmagic_bias;
- vfpacc1x1 += vmagic_bias;
+ const int32_t vrndacc0x0 = (int32_t) lrintf(vfpacc0x0);
+ const int32_t vrndacc0x1 = (int32_t) lrintf(vfpacc0x1);
+ const int32_t vrndacc1x0 = (int32_t) lrintf(vfpacc1x0);
+ const int32_t vrndacc1x1 = (int32_t) lrintf(vfpacc1x1);
- const int32_t vmagic_bias_less_output_zero_point = params->scalar_magic.magic_bias_less_output_zero_point;
- int32_t vout0x0 = (int32_t) fp32_to_bits(vfpacc0x0) - vmagic_bias_less_output_zero_point;
- int32_t vout0x1 = (int32_t) fp32_to_bits(vfpacc0x1) - vmagic_bias_less_output_zero_point;
- int32_t vout1x0 = (int32_t) fp32_to_bits(vfpacc1x0) - vmagic_bias_less_output_zero_point;
- int32_t vout1x1 = (int32_t) fp32_to_bits(vfpacc1x1) - vmagic_bias_less_output_zero_point;
+ const int32_t voutput_zero_point = params->scalar_lrintf.output_zero_point;
+ int32_t vout0x0 = vrndacc0x0 + voutput_zero_point;
+ int32_t vout0x1 = vrndacc0x1 + voutput_zero_point;
+ int32_t vout1x0 = vrndacc1x0 + voutput_zero_point;
+ int32_t vout1x1 = vrndacc1x1 + voutput_zero_point;
if XNN_LIKELY(nc >= 2) {
c0[0] = (int8_t) vout0x0;
diff --git a/src/qc8-gemm/gen/2x4-minmax-fp32-scalar-magic.c b/src/qc8-gemm/gen/2x4-minmax-fp32-scalar-fmagic.c
similarity index 93%
rename from src/qc8-gemm/gen/2x4-minmax-fp32-scalar-magic.c
rename to src/qc8-gemm/gen/2x4-minmax-fp32-scalar-fmagic.c
index 2866961..4ea0025 100644
--- a/src/qc8-gemm/gen/2x4-minmax-fp32-scalar-magic.c
+++ b/src/qc8-gemm/gen/2x4-minmax-fp32-scalar-fmagic.c
@@ -15,7 +15,7 @@
#include <xnnpack/gemm.h>
-void xnn_qc8_gemm_minmax_fp32_ukernel_2x4__scalar_magic(
+void xnn_qc8_gemm_minmax_fp32_ukernel_2x4__scalar_fmagic(
size_t mr,
size_t nc,
size_t kc,
@@ -98,7 +98,7 @@
vfpacc1x3 *= vscale3;
w = (const void*) ((const float*) w + 4);
- const float voutput_min_less_zero_point = params->scalar_magic.output_min_less_zero_point;
+ const float voutput_min_less_zero_point = params->scalar_fmagic.output_min_less_zero_point;
vfpacc0x0 = math_max_f32(vfpacc0x0, voutput_min_less_zero_point);
vfpacc0x1 = math_max_f32(vfpacc0x1, voutput_min_less_zero_point);
vfpacc0x2 = math_max_f32(vfpacc0x2, voutput_min_less_zero_point);
@@ -108,7 +108,7 @@
vfpacc1x2 = math_max_f32(vfpacc1x2, voutput_min_less_zero_point);
vfpacc1x3 = math_max_f32(vfpacc1x3, voutput_min_less_zero_point);
- const float voutput_max_less_zero_point = params->scalar_magic.output_max_less_zero_point;
+ const float voutput_max_less_zero_point = params->scalar_fmagic.output_max_less_zero_point;
vfpacc0x0 = math_min_f32(vfpacc0x0, voutput_max_less_zero_point);
vfpacc0x1 = math_min_f32(vfpacc0x1, voutput_max_less_zero_point);
vfpacc0x2 = math_min_f32(vfpacc0x2, voutput_max_less_zero_point);
@@ -118,7 +118,7 @@
vfpacc1x2 = math_min_f32(vfpacc1x2, voutput_max_less_zero_point);
vfpacc1x3 = math_min_f32(vfpacc1x3, voutput_max_less_zero_point);
- const float vmagic_bias = params->scalar_magic.magic_bias;
+ const float vmagic_bias = params->scalar_fmagic.magic_bias;
vfpacc0x0 += vmagic_bias;
vfpacc0x1 += vmagic_bias;
vfpacc0x2 += vmagic_bias;
@@ -128,7 +128,7 @@
vfpacc1x2 += vmagic_bias;
vfpacc1x3 += vmagic_bias;
- const int32_t vmagic_bias_less_output_zero_point = params->scalar_magic.magic_bias_less_output_zero_point;
+ const int32_t vmagic_bias_less_output_zero_point = params->scalar_fmagic.magic_bias_less_output_zero_point;
int32_t vout0x0 = (int32_t) fp32_to_bits(vfpacc0x0) - vmagic_bias_less_output_zero_point;
int32_t vout0x1 = (int32_t) fp32_to_bits(vfpacc0x1) - vmagic_bias_less_output_zero_point;
int32_t vout0x2 = (int32_t) fp32_to_bits(vfpacc0x2) - vmagic_bias_less_output_zero_point;
diff --git a/src/qc8-gemm/gen/2x4-minmax-fp32-scalar-lrint.c b/src/qc8-gemm/gen/2x4-minmax-fp32-scalar-lrint.c
deleted file mode 100644
index 347274c..0000000
--- a/src/qc8-gemm/gen/2x4-minmax-fp32-scalar-lrint.c
+++ /dev/null
@@ -1,175 +0,0 @@
-// Auto-generated file. Do not edit!
-// Template: src/qs8-gemm/scalar.c.in
-// Generator: tools/xngen
-//
-// Copyright 2021 Google LLC
-//
-// This source code is licensed under the BSD-style license found in the
-// LICENSE file in the root directory of this source tree.
-
-#include <assert.h>
-#include <math.h>
-
-#include <xnnpack/math.h>
-#include <xnnpack/gemm.h>
-
-
-void xnn_qc8_gemm_minmax_fp32_ukernel_2x4__scalar_lrint(
- size_t mr,
- size_t nc,
- size_t kc,
- const int8_t* restrict a,
- size_t a_stride,
- const void* restrict w,
- int8_t* restrict c,
- size_t cm_stride,
- size_t cn_stride,
- const union xnn_qs8_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
-{
- assert(mr != 0);
- assert(mr <= 2);
- assert(nc != 0);
- assert(kc != 0);
-
- const int8_t* a0 = a;
- int8_t* c0 = c;
- const int8_t* a1 = (const int8_t*) ((uintptr_t) a0 + a_stride);
- int8_t* c1 = (int8_t*) ((uintptr_t) c0 + cm_stride);
- if XNN_UNPREDICTABLE(mr != 2) {
- a1 = a0;
- c1 = c0;
- }
-
- do {
- int32_t vacc0x0 = ((const int32_t*) w)[0];
- int32_t vacc0x1 = ((const int32_t*) w)[1];
- int32_t vacc0x2 = ((const int32_t*) w)[2];
- int32_t vacc0x3 = ((const int32_t*) w)[3];
- int32_t vacc1x0 = vacc0x0;
- int32_t vacc1x1 = vacc0x1;
- int32_t vacc1x2 = vacc0x2;
- int32_t vacc1x3 = vacc0x3;
- w = (const void*) ((const int32_t*) w + 4);
-
- size_t k = kc;
- do {
- const int32_t va0 = (int32_t) *a0++;
- const int32_t va1 = (int32_t) *a1++;
-
- const int32_t vb0 = (int32_t) ((const int8_t*) w)[0];
- const int32_t vb1 = (int32_t) ((const int8_t*) w)[1];
- const int32_t vb2 = (int32_t) ((const int8_t*) w)[2];
- const int32_t vb3 = (int32_t) ((const int8_t*) w)[3];
- w = (const void*) ((const int8_t*) w + 4);
-
- vacc0x0 += va0 * vb0;
- vacc0x1 += va0 * vb1;
- vacc0x2 += va0 * vb2;
- vacc0x3 += va0 * vb3;
- vacc1x0 += va1 * vb0;
- vacc1x1 += va1 * vb1;
- vacc1x2 += va1 * vb2;
- vacc1x3 += va1 * vb3;
-
- k -= sizeof(int8_t);
- } while (k != 0);
-
- float vfpacc0x0 = (float) vacc0x0;
- float vfpacc0x1 = (float) vacc0x1;
- float vfpacc0x2 = (float) vacc0x2;
- float vfpacc0x3 = (float) vacc0x3;
- float vfpacc1x0 = (float) vacc1x0;
- float vfpacc1x1 = (float) vacc1x1;
- float vfpacc1x2 = (float) vacc1x2;
- float vfpacc1x3 = (float) vacc1x3;
-
- const float vscale0 = ((const float*) w)[0];
- vfpacc0x0 *= vscale0;
- vfpacc1x0 *= vscale0;
- const float vscale1 = ((const float*) w)[1];
- vfpacc0x1 *= vscale1;
- vfpacc1x1 *= vscale1;
- const float vscale2 = ((const float*) w)[2];
- vfpacc0x2 *= vscale2;
- vfpacc1x2 *= vscale2;
- const float vscale3 = ((const float*) w)[3];
- vfpacc0x3 *= vscale3;
- vfpacc1x3 *= vscale3;
- w = (const void*) ((const float*) w + 4);
-
- long vrndacc0x0 = lrintf(vfpacc0x0);
- long vrndacc0x1 = lrintf(vfpacc0x1);
- long vrndacc0x2 = lrintf(vfpacc0x2);
- long vrndacc0x3 = lrintf(vfpacc0x3);
- long vrndacc1x0 = lrintf(vfpacc1x0);
- long vrndacc1x1 = lrintf(vfpacc1x1);
- long vrndacc1x2 = lrintf(vfpacc1x2);
- long vrndacc1x3 = lrintf(vfpacc1x3);
-
- const long voutput_min_less_zero_point = params->scalar_lrint.output_min_less_zero_point;
- vrndacc0x0 = XNN_UNPREDICTABLE(vrndacc0x0 < voutput_min_less_zero_point) ? voutput_min_less_zero_point : vrndacc0x0;
- vrndacc0x1 = XNN_UNPREDICTABLE(vrndacc0x1 < voutput_min_less_zero_point) ? voutput_min_less_zero_point : vrndacc0x1;
- vrndacc0x2 = XNN_UNPREDICTABLE(vrndacc0x2 < voutput_min_less_zero_point) ? voutput_min_less_zero_point : vrndacc0x2;
- vrndacc0x3 = XNN_UNPREDICTABLE(vrndacc0x3 < voutput_min_less_zero_point) ? voutput_min_less_zero_point : vrndacc0x3;
- vrndacc1x0 = XNN_UNPREDICTABLE(vrndacc1x0 < voutput_min_less_zero_point) ? voutput_min_less_zero_point : vrndacc1x0;
- vrndacc1x1 = XNN_UNPREDICTABLE(vrndacc1x1 < voutput_min_less_zero_point) ? voutput_min_less_zero_point : vrndacc1x1;
- vrndacc1x2 = XNN_UNPREDICTABLE(vrndacc1x2 < voutput_min_less_zero_point) ? voutput_min_less_zero_point : vrndacc1x2;
- vrndacc1x3 = XNN_UNPREDICTABLE(vrndacc1x3 < voutput_min_less_zero_point) ? voutput_min_less_zero_point : vrndacc1x3;
-
- const long voutput_max_less_zero_point = params->scalar_lrint.output_max_less_zero_point;
- vrndacc0x0 = XNN_UNPREDICTABLE(vrndacc0x0 > voutput_max_less_zero_point) ? voutput_max_less_zero_point : vrndacc0x0;
- vrndacc0x1 = XNN_UNPREDICTABLE(vrndacc0x1 > voutput_max_less_zero_point) ? voutput_max_less_zero_point : vrndacc0x1;
- vrndacc0x2 = XNN_UNPREDICTABLE(vrndacc0x2 > voutput_max_less_zero_point) ? voutput_max_less_zero_point : vrndacc0x2;
- vrndacc0x3 = XNN_UNPREDICTABLE(vrndacc0x3 > voutput_max_less_zero_point) ? voutput_max_less_zero_point : vrndacc0x3;
- vrndacc1x0 = XNN_UNPREDICTABLE(vrndacc1x0 > voutput_max_less_zero_point) ? voutput_max_less_zero_point : vrndacc1x0;
- vrndacc1x1 = XNN_UNPREDICTABLE(vrndacc1x1 > voutput_max_less_zero_point) ? voutput_max_less_zero_point : vrndacc1x1;
- vrndacc1x2 = XNN_UNPREDICTABLE(vrndacc1x2 > voutput_max_less_zero_point) ? voutput_max_less_zero_point : vrndacc1x2;
- vrndacc1x3 = XNN_UNPREDICTABLE(vrndacc1x3 > voutput_max_less_zero_point) ? voutput_max_less_zero_point : vrndacc1x3;
-
- const int32_t voutput_zero_point = params->scalar_lrint.output_zero_point;
- int32_t vout0x0 = (int32_t) vrndacc0x0 + voutput_zero_point;
- int32_t vout0x1 = (int32_t) vrndacc0x1 + voutput_zero_point;
- int32_t vout0x2 = (int32_t) vrndacc0x2 + voutput_zero_point;
- int32_t vout0x3 = (int32_t) vrndacc0x3 + voutput_zero_point;
- int32_t vout1x0 = (int32_t) vrndacc1x0 + voutput_zero_point;
- int32_t vout1x1 = (int32_t) vrndacc1x1 + voutput_zero_point;
- int32_t vout1x2 = (int32_t) vrndacc1x2 + voutput_zero_point;
- int32_t vout1x3 = (int32_t) vrndacc1x3 + voutput_zero_point;
-
- if XNN_LIKELY(nc >= 4) {
- c0[0] = (int8_t) vout0x0;
- c0[1] = (int8_t) vout0x1;
- c0[2] = (int8_t) vout0x2;
- c0[3] = (int8_t) vout0x3;
- c1[0] = (int8_t) vout1x0;
- c1[1] = (int8_t) vout1x1;
- c1[2] = (int8_t) vout1x2;
- c1[3] = (int8_t) vout1x3;
-
- a0 = (const int8_t*) ((uintptr_t) a0 - kc);
- a1 = (const int8_t*) ((uintptr_t) a1 - kc);
-
- c0 = (int8_t*) ((uintptr_t) c0 + cn_stride);
- c1 = (int8_t*) ((uintptr_t) c1 + cn_stride);
-
- nc -= 4;
- } else {
- if (nc & 2) {
- c0[0] = (int8_t) vout0x0;
- c0[1] = (int8_t) vout0x1;
- vout0x0 = vout0x2;
- c0 += 2;
- c1[0] = (int8_t) vout1x0;
- c1[1] = (int8_t) vout1x1;
- vout1x0 = vout1x2;
- c1 += 2;
- }
- if (nc & 1) {
- c0[0] = (int8_t) vout0x0;
- c1[0] = (int8_t) vout1x0;
- }
-
- nc = 0;
- }
- } while (nc != 0);
-}
diff --git a/src/qc8-gemm/gen/2x4-minmax-fp32-scalar-magic.c b/src/qc8-gemm/gen/2x4-minmax-fp32-scalar-lrintf.c
similarity index 76%
copy from src/qc8-gemm/gen/2x4-minmax-fp32-scalar-magic.c
copy to src/qc8-gemm/gen/2x4-minmax-fp32-scalar-lrintf.c
index 2866961..f10df04 100644
--- a/src/qc8-gemm/gen/2x4-minmax-fp32-scalar-magic.c
+++ b/src/qc8-gemm/gen/2x4-minmax-fp32-scalar-lrintf.c
@@ -8,14 +8,13 @@
// LICENSE file in the root directory of this source tree.
#include <assert.h>
-
-#include <fp16.h>
+#include <math.h>
#include <xnnpack/math.h>
#include <xnnpack/gemm.h>
-void xnn_qc8_gemm_minmax_fp32_ukernel_2x4__scalar_magic(
+void xnn_qc8_gemm_minmax_fp32_ukernel_2x4__scalar_lrintf(
size_t mr,
size_t nc,
size_t kc,
@@ -98,7 +97,7 @@
vfpacc1x3 *= vscale3;
w = (const void*) ((const float*) w + 4);
- const float voutput_min_less_zero_point = params->scalar_magic.output_min_less_zero_point;
+ const float voutput_min_less_zero_point = params->scalar_lrintf.output_min_less_zero_point;
vfpacc0x0 = math_max_f32(vfpacc0x0, voutput_min_less_zero_point);
vfpacc0x1 = math_max_f32(vfpacc0x1, voutput_min_less_zero_point);
vfpacc0x2 = math_max_f32(vfpacc0x2, voutput_min_less_zero_point);
@@ -108,7 +107,7 @@
vfpacc1x2 = math_max_f32(vfpacc1x2, voutput_min_less_zero_point);
vfpacc1x3 = math_max_f32(vfpacc1x3, voutput_min_less_zero_point);
- const float voutput_max_less_zero_point = params->scalar_magic.output_max_less_zero_point;
+ const float voutput_max_less_zero_point = params->scalar_lrintf.output_max_less_zero_point;
vfpacc0x0 = math_min_f32(vfpacc0x0, voutput_max_less_zero_point);
vfpacc0x1 = math_min_f32(vfpacc0x1, voutput_max_less_zero_point);
vfpacc0x2 = math_min_f32(vfpacc0x2, voutput_max_less_zero_point);
@@ -118,25 +117,24 @@
vfpacc1x2 = math_min_f32(vfpacc1x2, voutput_max_less_zero_point);
vfpacc1x3 = math_min_f32(vfpacc1x3, voutput_max_less_zero_point);
- const float vmagic_bias = params->scalar_magic.magic_bias;
- vfpacc0x0 += vmagic_bias;
- vfpacc0x1 += vmagic_bias;
- vfpacc0x2 += vmagic_bias;
- vfpacc0x3 += vmagic_bias;
- vfpacc1x0 += vmagic_bias;
- vfpacc1x1 += vmagic_bias;
- vfpacc1x2 += vmagic_bias;
- vfpacc1x3 += vmagic_bias;
+ const int32_t vrndacc0x0 = (int32_t) lrintf(vfpacc0x0);
+ const int32_t vrndacc0x1 = (int32_t) lrintf(vfpacc0x1);
+ const int32_t vrndacc0x2 = (int32_t) lrintf(vfpacc0x2);
+ const int32_t vrndacc0x3 = (int32_t) lrintf(vfpacc0x3);
+ const int32_t vrndacc1x0 = (int32_t) lrintf(vfpacc1x0);
+ const int32_t vrndacc1x1 = (int32_t) lrintf(vfpacc1x1);
+ const int32_t vrndacc1x2 = (int32_t) lrintf(vfpacc1x2);
+ const int32_t vrndacc1x3 = (int32_t) lrintf(vfpacc1x3);
- const int32_t vmagic_bias_less_output_zero_point = params->scalar_magic.magic_bias_less_output_zero_point;
- int32_t vout0x0 = (int32_t) fp32_to_bits(vfpacc0x0) - vmagic_bias_less_output_zero_point;
- int32_t vout0x1 = (int32_t) fp32_to_bits(vfpacc0x1) - vmagic_bias_less_output_zero_point;
- int32_t vout0x2 = (int32_t) fp32_to_bits(vfpacc0x2) - vmagic_bias_less_output_zero_point;
- int32_t vout0x3 = (int32_t) fp32_to_bits(vfpacc0x3) - vmagic_bias_less_output_zero_point;
- int32_t vout1x0 = (int32_t) fp32_to_bits(vfpacc1x0) - vmagic_bias_less_output_zero_point;
- int32_t vout1x1 = (int32_t) fp32_to_bits(vfpacc1x1) - vmagic_bias_less_output_zero_point;
- int32_t vout1x2 = (int32_t) fp32_to_bits(vfpacc1x2) - vmagic_bias_less_output_zero_point;
- int32_t vout1x3 = (int32_t) fp32_to_bits(vfpacc1x3) - vmagic_bias_less_output_zero_point;
+ const int32_t voutput_zero_point = params->scalar_lrintf.output_zero_point;
+ int32_t vout0x0 = vrndacc0x0 + voutput_zero_point;
+ int32_t vout0x1 = vrndacc0x1 + voutput_zero_point;
+ int32_t vout0x2 = vrndacc0x2 + voutput_zero_point;
+ int32_t vout0x3 = vrndacc0x3 + voutput_zero_point;
+ int32_t vout1x0 = vrndacc1x0 + voutput_zero_point;
+ int32_t vout1x1 = vrndacc1x1 + voutput_zero_point;
+ int32_t vout1x2 = vrndacc1x2 + voutput_zero_point;
+ int32_t vout1x3 = vrndacc1x3 + voutput_zero_point;
if XNN_LIKELY(nc >= 4) {
c0[0] = (int8_t) vout0x0;
diff --git a/src/qc8-gemm/gen/3x2-minmax-fp32-scalar-magic.c b/src/qc8-gemm/gen/3x2-minmax-fp32-scalar-fmagic.c
similarity index 92%
rename from src/qc8-gemm/gen/3x2-minmax-fp32-scalar-magic.c
rename to src/qc8-gemm/gen/3x2-minmax-fp32-scalar-fmagic.c
index 0441ca8..2866290 100644
--- a/src/qc8-gemm/gen/3x2-minmax-fp32-scalar-magic.c
+++ b/src/qc8-gemm/gen/3x2-minmax-fp32-scalar-fmagic.c
@@ -15,7 +15,7 @@
#include <xnnpack/gemm.h>
-void xnn_qc8_gemm_minmax_fp32_ukernel_3x2__scalar_magic(
+void xnn_qc8_gemm_minmax_fp32_ukernel_3x2__scalar_fmagic(
size_t mr,
size_t nc,
size_t kc,
@@ -94,7 +94,7 @@
vfpacc2x1 *= vscale1;
w = (const void*) ((const float*) w + 2);
- const float voutput_min_less_zero_point = params->scalar_magic.output_min_less_zero_point;
+ const float voutput_min_less_zero_point = params->scalar_fmagic.output_min_less_zero_point;
vfpacc0x0 = math_max_f32(vfpacc0x0, voutput_min_less_zero_point);
vfpacc0x1 = math_max_f32(vfpacc0x1, voutput_min_less_zero_point);
vfpacc1x0 = math_max_f32(vfpacc1x0, voutput_min_less_zero_point);
@@ -102,7 +102,7 @@
vfpacc2x0 = math_max_f32(vfpacc2x0, voutput_min_less_zero_point);
vfpacc2x1 = math_max_f32(vfpacc2x1, voutput_min_less_zero_point);
- const float voutput_max_less_zero_point = params->scalar_magic.output_max_less_zero_point;
+ const float voutput_max_less_zero_point = params->scalar_fmagic.output_max_less_zero_point;
vfpacc0x0 = math_min_f32(vfpacc0x0, voutput_max_less_zero_point);
vfpacc0x1 = math_min_f32(vfpacc0x1, voutput_max_less_zero_point);
vfpacc1x0 = math_min_f32(vfpacc1x0, voutput_max_less_zero_point);
@@ -110,7 +110,7 @@
vfpacc2x0 = math_min_f32(vfpacc2x0, voutput_max_less_zero_point);
vfpacc2x1 = math_min_f32(vfpacc2x1, voutput_max_less_zero_point);
- const float vmagic_bias = params->scalar_magic.magic_bias;
+ const float vmagic_bias = params->scalar_fmagic.magic_bias;
vfpacc0x0 += vmagic_bias;
vfpacc0x1 += vmagic_bias;
vfpacc1x0 += vmagic_bias;
@@ -118,7 +118,7 @@
vfpacc2x0 += vmagic_bias;
vfpacc2x1 += vmagic_bias;
- const int32_t vmagic_bias_less_output_zero_point = params->scalar_magic.magic_bias_less_output_zero_point;
+ const int32_t vmagic_bias_less_output_zero_point = params->scalar_fmagic.magic_bias_less_output_zero_point;
int32_t vout0x0 = (int32_t) fp32_to_bits(vfpacc0x0) - vmagic_bias_less_output_zero_point;
int32_t vout0x1 = (int32_t) fp32_to_bits(vfpacc0x1) - vmagic_bias_less_output_zero_point;
int32_t vout1x0 = (int32_t) fp32_to_bits(vfpacc1x0) - vmagic_bias_less_output_zero_point;
diff --git a/src/qc8-gemm/gen/3x2-minmax-fp32-scalar-lrint.c b/src/qc8-gemm/gen/3x2-minmax-fp32-scalar-lrint.c
deleted file mode 100644
index d763ffa..0000000
--- a/src/qc8-gemm/gen/3x2-minmax-fp32-scalar-lrint.c
+++ /dev/null
@@ -1,154 +0,0 @@
-// Auto-generated file. Do not edit!
-// Template: src/qs8-gemm/scalar.c.in
-// Generator: tools/xngen
-//
-// Copyright 2021 Google LLC
-//
-// This source code is licensed under the BSD-style license found in the
-// LICENSE file in the root directory of this source tree.
-
-#include <assert.h>
-#include <math.h>
-
-#include <xnnpack/math.h>
-#include <xnnpack/gemm.h>
-
-
-void xnn_qc8_gemm_minmax_fp32_ukernel_3x2__scalar_lrint(
- size_t mr,
- size_t nc,
- size_t kc,
- const int8_t* restrict a,
- size_t a_stride,
- const void* restrict w,
- int8_t* restrict c,
- size_t cm_stride,
- size_t cn_stride,
- const union xnn_qs8_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
-{
- assert(mr != 0);
- assert(mr <= 3);
- assert(nc != 0);
- assert(kc != 0);
-
- const int8_t* a0 = a;
- int8_t* c0 = c;
- const int8_t* a1 = (const int8_t*) ((uintptr_t) a0 + a_stride);
- int8_t* c1 = (int8_t*) ((uintptr_t) c0 + cm_stride);
- if XNN_UNPREDICTABLE(mr < 2) {
- a1 = a0;
- c1 = c0;
- }
- const int8_t* a2 = (const int8_t*) ((uintptr_t) a1 + a_stride);
- int8_t* c2 = (int8_t*) ((uintptr_t) c1 + cm_stride);
- if XNN_UNPREDICTABLE(mr <= 2) {
- a2 = a1;
- c2 = c1;
- }
-
- do {
- int32_t vacc0x0 = ((const int32_t*) w)[0];
- int32_t vacc0x1 = ((const int32_t*) w)[1];
- int32_t vacc1x0 = vacc0x0;
- int32_t vacc1x1 = vacc0x1;
- int32_t vacc2x0 = vacc0x0;
- int32_t vacc2x1 = vacc0x1;
- w = (const void*) ((const int32_t*) w + 2);
-
- size_t k = kc;
- do {
- const int32_t va0 = (int32_t) *a0++;
- const int32_t va1 = (int32_t) *a1++;
- const int32_t va2 = (int32_t) *a2++;
-
- const int32_t vb0 = (int32_t) ((const int8_t*) w)[0];
- const int32_t vb1 = (int32_t) ((const int8_t*) w)[1];
- w = (const void*) ((const int8_t*) w + 2);
-
- vacc0x0 += va0 * vb0;
- vacc0x1 += va0 * vb1;
- vacc1x0 += va1 * vb0;
- vacc1x1 += va1 * vb1;
- vacc2x0 += va2 * vb0;
- vacc2x1 += va2 * vb1;
-
- k -= sizeof(int8_t);
- } while (k != 0);
-
- float vfpacc0x0 = (float) vacc0x0;
- float vfpacc0x1 = (float) vacc0x1;
- float vfpacc1x0 = (float) vacc1x0;
- float vfpacc1x1 = (float) vacc1x1;
- float vfpacc2x0 = (float) vacc2x0;
- float vfpacc2x1 = (float) vacc2x1;
-
- typedef XNN_UNALIGNED float unaligned_float;
- const float vscale0 = ((const unaligned_float*) w)[0];
- vfpacc0x0 *= vscale0;
- vfpacc1x0 *= vscale0;
- vfpacc2x0 *= vscale0;
- const float vscale1 = ((const unaligned_float*) w)[1];
- vfpacc0x1 *= vscale1;
- vfpacc1x1 *= vscale1;
- vfpacc2x1 *= vscale1;
- w = (const void*) ((const float*) w + 2);
-
- long vrndacc0x0 = lrintf(vfpacc0x0);
- long vrndacc0x1 = lrintf(vfpacc0x1);
- long vrndacc1x0 = lrintf(vfpacc1x0);
- long vrndacc1x1 = lrintf(vfpacc1x1);
- long vrndacc2x0 = lrintf(vfpacc2x0);
- long vrndacc2x1 = lrintf(vfpacc2x1);
-
- const long voutput_min_less_zero_point = params->scalar_lrint.output_min_less_zero_point;
- vrndacc0x0 = XNN_UNPREDICTABLE(vrndacc0x0 < voutput_min_less_zero_point) ? voutput_min_less_zero_point : vrndacc0x0;
- vrndacc0x1 = XNN_UNPREDICTABLE(vrndacc0x1 < voutput_min_less_zero_point) ? voutput_min_less_zero_point : vrndacc0x1;
- vrndacc1x0 = XNN_UNPREDICTABLE(vrndacc1x0 < voutput_min_less_zero_point) ? voutput_min_less_zero_point : vrndacc1x0;
- vrndacc1x1 = XNN_UNPREDICTABLE(vrndacc1x1 < voutput_min_less_zero_point) ? voutput_min_less_zero_point : vrndacc1x1;
- vrndacc2x0 = XNN_UNPREDICTABLE(vrndacc2x0 < voutput_min_less_zero_point) ? voutput_min_less_zero_point : vrndacc2x0;
- vrndacc2x1 = XNN_UNPREDICTABLE(vrndacc2x1 < voutput_min_less_zero_point) ? voutput_min_less_zero_point : vrndacc2x1;
-
- const long voutput_max_less_zero_point = params->scalar_lrint.output_max_less_zero_point;
- vrndacc0x0 = XNN_UNPREDICTABLE(vrndacc0x0 > voutput_max_less_zero_point) ? voutput_max_less_zero_point : vrndacc0x0;
- vrndacc0x1 = XNN_UNPREDICTABLE(vrndacc0x1 > voutput_max_less_zero_point) ? voutput_max_less_zero_point : vrndacc0x1;
- vrndacc1x0 = XNN_UNPREDICTABLE(vrndacc1x0 > voutput_max_less_zero_point) ? voutput_max_less_zero_point : vrndacc1x0;
- vrndacc1x1 = XNN_UNPREDICTABLE(vrndacc1x1 > voutput_max_less_zero_point) ? voutput_max_less_zero_point : vrndacc1x1;
- vrndacc2x0 = XNN_UNPREDICTABLE(vrndacc2x0 > voutput_max_less_zero_point) ? voutput_max_less_zero_point : vrndacc2x0;
- vrndacc2x1 = XNN_UNPREDICTABLE(vrndacc2x1 > voutput_max_less_zero_point) ? voutput_max_less_zero_point : vrndacc2x1;
-
- const int32_t voutput_zero_point = params->scalar_lrint.output_zero_point;
- int32_t vout0x0 = (int32_t) vrndacc0x0 + voutput_zero_point;
- int32_t vout0x1 = (int32_t) vrndacc0x1 + voutput_zero_point;
- int32_t vout1x0 = (int32_t) vrndacc1x0 + voutput_zero_point;
- int32_t vout1x1 = (int32_t) vrndacc1x1 + voutput_zero_point;
- int32_t vout2x0 = (int32_t) vrndacc2x0 + voutput_zero_point;
- int32_t vout2x1 = (int32_t) vrndacc2x1 + voutput_zero_point;
-
- if XNN_LIKELY(nc >= 2) {
- c0[0] = (int8_t) vout0x0;
- c0[1] = (int8_t) vout0x1;
- c1[0] = (int8_t) vout1x0;
- c1[1] = (int8_t) vout1x1;
- c2[0] = (int8_t) vout2x0;
- c2[1] = (int8_t) vout2x1;
-
- a0 = (const int8_t*) ((uintptr_t) a0 - kc);
- a1 = (const int8_t*) ((uintptr_t) a1 - kc);
- a2 = (const int8_t*) ((uintptr_t) a2 - kc);
-
- c0 = (int8_t*) ((uintptr_t) c0 + cn_stride);
- c1 = (int8_t*) ((uintptr_t) c1 + cn_stride);
- c2 = (int8_t*) ((uintptr_t) c2 + cn_stride);
-
- nc -= 2;
- } else {
- if (nc & 1) {
- c0[0] = (int8_t) vout0x0;
- c1[0] = (int8_t) vout1x0;
- c2[0] = (int8_t) vout2x0;
- }
-
- nc = 0;
- }
- } while (nc != 0);
-}
diff --git a/src/qc8-gemm/gen/3x2-minmax-fp32-scalar-magic.c b/src/qc8-gemm/gen/3x2-minmax-fp32-scalar-lrintf.c
similarity index 77%
copy from src/qc8-gemm/gen/3x2-minmax-fp32-scalar-magic.c
copy to src/qc8-gemm/gen/3x2-minmax-fp32-scalar-lrintf.c
index 0441ca8..9590a02 100644
--- a/src/qc8-gemm/gen/3x2-minmax-fp32-scalar-magic.c
+++ b/src/qc8-gemm/gen/3x2-minmax-fp32-scalar-lrintf.c
@@ -8,14 +8,13 @@
// LICENSE file in the root directory of this source tree.
#include <assert.h>
-
-#include <fp16.h>
+#include <math.h>
#include <xnnpack/math.h>
#include <xnnpack/gemm.h>
-void xnn_qc8_gemm_minmax_fp32_ukernel_3x2__scalar_magic(
+void xnn_qc8_gemm_minmax_fp32_ukernel_3x2__scalar_lrintf(
size_t mr,
size_t nc,
size_t kc,
@@ -94,7 +93,7 @@
vfpacc2x1 *= vscale1;
w = (const void*) ((const float*) w + 2);
- const float voutput_min_less_zero_point = params->scalar_magic.output_min_less_zero_point;
+ const float voutput_min_less_zero_point = params->scalar_lrintf.output_min_less_zero_point;
vfpacc0x0 = math_max_f32(vfpacc0x0, voutput_min_less_zero_point);
vfpacc0x1 = math_max_f32(vfpacc0x1, voutput_min_less_zero_point);
vfpacc1x0 = math_max_f32(vfpacc1x0, voutput_min_less_zero_point);
@@ -102,7 +101,7 @@
vfpacc2x0 = math_max_f32(vfpacc2x0, voutput_min_less_zero_point);
vfpacc2x1 = math_max_f32(vfpacc2x1, voutput_min_less_zero_point);
- const float voutput_max_less_zero_point = params->scalar_magic.output_max_less_zero_point;
+ const float voutput_max_less_zero_point = params->scalar_lrintf.output_max_less_zero_point;
vfpacc0x0 = math_min_f32(vfpacc0x0, voutput_max_less_zero_point);
vfpacc0x1 = math_min_f32(vfpacc0x1, voutput_max_less_zero_point);
vfpacc1x0 = math_min_f32(vfpacc1x0, voutput_max_less_zero_point);
@@ -110,21 +109,20 @@
vfpacc2x0 = math_min_f32(vfpacc2x0, voutput_max_less_zero_point);
vfpacc2x1 = math_min_f32(vfpacc2x1, voutput_max_less_zero_point);
- const float vmagic_bias = params->scalar_magic.magic_bias;
- vfpacc0x0 += vmagic_bias;
- vfpacc0x1 += vmagic_bias;
- vfpacc1x0 += vmagic_bias;
- vfpacc1x1 += vmagic_bias;
- vfpacc2x0 += vmagic_bias;
- vfpacc2x1 += vmagic_bias;
+ const int32_t vrndacc0x0 = (int32_t) lrintf(vfpacc0x0);
+ const int32_t vrndacc0x1 = (int32_t) lrintf(vfpacc0x1);
+ const int32_t vrndacc1x0 = (int32_t) lrintf(vfpacc1x0);
+ const int32_t vrndacc1x1 = (int32_t) lrintf(vfpacc1x1);
+ const int32_t vrndacc2x0 = (int32_t) lrintf(vfpacc2x0);
+ const int32_t vrndacc2x1 = (int32_t) lrintf(vfpacc2x1);
- const int32_t vmagic_bias_less_output_zero_point = params->scalar_magic.magic_bias_less_output_zero_point;
- int32_t vout0x0 = (int32_t) fp32_to_bits(vfpacc0x0) - vmagic_bias_less_output_zero_point;
- int32_t vout0x1 = (int32_t) fp32_to_bits(vfpacc0x1) - vmagic_bias_less_output_zero_point;
- int32_t vout1x0 = (int32_t) fp32_to_bits(vfpacc1x0) - vmagic_bias_less_output_zero_point;
- int32_t vout1x1 = (int32_t) fp32_to_bits(vfpacc1x1) - vmagic_bias_less_output_zero_point;
- int32_t vout2x0 = (int32_t) fp32_to_bits(vfpacc2x0) - vmagic_bias_less_output_zero_point;
- int32_t vout2x1 = (int32_t) fp32_to_bits(vfpacc2x1) - vmagic_bias_less_output_zero_point;
+ const int32_t voutput_zero_point = params->scalar_lrintf.output_zero_point;
+ int32_t vout0x0 = vrndacc0x0 + voutput_zero_point;
+ int32_t vout0x1 = vrndacc0x1 + voutput_zero_point;
+ int32_t vout1x0 = vrndacc1x0 + voutput_zero_point;
+ int32_t vout1x1 = vrndacc1x1 + voutput_zero_point;
+ int32_t vout2x0 = vrndacc2x0 + voutput_zero_point;
+ int32_t vout2x1 = vrndacc2x1 + voutput_zero_point;
if XNN_LIKELY(nc >= 2) {
c0[0] = (int8_t) vout0x0;
diff --git a/src/qc8-gemm/gen/3x4-minmax-fp32-scalar-magic.c b/src/qc8-gemm/gen/3x4-minmax-fp32-scalar-fmagic.c
similarity index 95%
rename from src/qc8-gemm/gen/3x4-minmax-fp32-scalar-magic.c
rename to src/qc8-gemm/gen/3x4-minmax-fp32-scalar-fmagic.c
index d0dd95c..b5db9f1 100644
--- a/src/qc8-gemm/gen/3x4-minmax-fp32-scalar-magic.c
+++ b/src/qc8-gemm/gen/3x4-minmax-fp32-scalar-fmagic.c
@@ -15,7 +15,7 @@
#include <xnnpack/gemm.h>
-void xnn_qc8_gemm_minmax_fp32_ukernel_3x4__scalar_magic(
+void xnn_qc8_gemm_minmax_fp32_ukernel_3x4__scalar_fmagic(
size_t mr,
size_t nc,
size_t kc,
@@ -121,7 +121,7 @@
vfpacc2x3 *= vscale3;
w = (const void*) ((const float*) w + 4);
- const float voutput_min_less_zero_point = params->scalar_magic.output_min_less_zero_point;
+ const float voutput_min_less_zero_point = params->scalar_fmagic.output_min_less_zero_point;
vfpacc0x0 = math_max_f32(vfpacc0x0, voutput_min_less_zero_point);
vfpacc0x1 = math_max_f32(vfpacc0x1, voutput_min_less_zero_point);
vfpacc0x2 = math_max_f32(vfpacc0x2, voutput_min_less_zero_point);
@@ -135,7 +135,7 @@
vfpacc2x2 = math_max_f32(vfpacc2x2, voutput_min_less_zero_point);
vfpacc2x3 = math_max_f32(vfpacc2x3, voutput_min_less_zero_point);
- const float voutput_max_less_zero_point = params->scalar_magic.output_max_less_zero_point;
+ const float voutput_max_less_zero_point = params->scalar_fmagic.output_max_less_zero_point;
vfpacc0x0 = math_min_f32(vfpacc0x0, voutput_max_less_zero_point);
vfpacc0x1 = math_min_f32(vfpacc0x1, voutput_max_less_zero_point);
vfpacc0x2 = math_min_f32(vfpacc0x2, voutput_max_less_zero_point);
@@ -149,7 +149,7 @@
vfpacc2x2 = math_min_f32(vfpacc2x2, voutput_max_less_zero_point);
vfpacc2x3 = math_min_f32(vfpacc2x3, voutput_max_less_zero_point);
- const float vmagic_bias = params->scalar_magic.magic_bias;
+ const float vmagic_bias = params->scalar_fmagic.magic_bias;
vfpacc0x0 += vmagic_bias;
vfpacc0x1 += vmagic_bias;
vfpacc0x2 += vmagic_bias;
@@ -163,7 +163,7 @@
vfpacc2x2 += vmagic_bias;
vfpacc2x3 += vmagic_bias;
- const int32_t vmagic_bias_less_output_zero_point = params->scalar_magic.magic_bias_less_output_zero_point;
+ const int32_t vmagic_bias_less_output_zero_point = params->scalar_fmagic.magic_bias_less_output_zero_point;
int32_t vout0x0 = (int32_t) fp32_to_bits(vfpacc0x0) - vmagic_bias_less_output_zero_point;
int32_t vout0x1 = (int32_t) fp32_to_bits(vfpacc0x1) - vmagic_bias_less_output_zero_point;
int32_t vout0x2 = (int32_t) fp32_to_bits(vfpacc0x2) - vmagic_bias_less_output_zero_point;
diff --git a/src/qc8-gemm/gen/3x4-minmax-fp32-scalar-lrint.c b/src/qc8-gemm/gen/3x4-minmax-fp32-scalar-lrint.c
deleted file mode 100644
index 7f4716d..0000000
--- a/src/qc8-gemm/gen/3x4-minmax-fp32-scalar-lrint.c
+++ /dev/null
@@ -1,225 +0,0 @@
-// Auto-generated file. Do not edit!
-// Template: src/qs8-gemm/scalar.c.in
-// Generator: tools/xngen
-//
-// Copyright 2021 Google LLC
-//
-// This source code is licensed under the BSD-style license found in the
-// LICENSE file in the root directory of this source tree.
-
-#include <assert.h>
-#include <math.h>
-
-#include <xnnpack/math.h>
-#include <xnnpack/gemm.h>
-
-
-void xnn_qc8_gemm_minmax_fp32_ukernel_3x4__scalar_lrint(
- size_t mr,
- size_t nc,
- size_t kc,
- const int8_t* restrict a,
- size_t a_stride,
- const void* restrict w,
- int8_t* restrict c,
- size_t cm_stride,
- size_t cn_stride,
- const union xnn_qs8_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
-{
- assert(mr != 0);
- assert(mr <= 3);
- assert(nc != 0);
- assert(kc != 0);
-
- const int8_t* a0 = a;
- int8_t* c0 = c;
- const int8_t* a1 = (const int8_t*) ((uintptr_t) a0 + a_stride);
- int8_t* c1 = (int8_t*) ((uintptr_t) c0 + cm_stride);
- if XNN_UNPREDICTABLE(mr < 2) {
- a1 = a0;
- c1 = c0;
- }
- const int8_t* a2 = (const int8_t*) ((uintptr_t) a1 + a_stride);
- int8_t* c2 = (int8_t*) ((uintptr_t) c1 + cm_stride);
- if XNN_UNPREDICTABLE(mr <= 2) {
- a2 = a1;
- c2 = c1;
- }
-
- do {
- int32_t vacc0x0 = ((const int32_t*) w)[0];
- int32_t vacc0x1 = ((const int32_t*) w)[1];
- int32_t vacc0x2 = ((const int32_t*) w)[2];
- int32_t vacc0x3 = ((const int32_t*) w)[3];
- int32_t vacc1x0 = vacc0x0;
- int32_t vacc1x1 = vacc0x1;
- int32_t vacc1x2 = vacc0x2;
- int32_t vacc1x3 = vacc0x3;
- int32_t vacc2x0 = vacc0x0;
- int32_t vacc2x1 = vacc0x1;
- int32_t vacc2x2 = vacc0x2;
- int32_t vacc2x3 = vacc0x3;
- w = (const void*) ((const int32_t*) w + 4);
-
- size_t k = kc;
- do {
- const int32_t va0 = (int32_t) *a0++;
- const int32_t va1 = (int32_t) *a1++;
- const int32_t va2 = (int32_t) *a2++;
-
- const int32_t vb0 = (int32_t) ((const int8_t*) w)[0];
- const int32_t vb1 = (int32_t) ((const int8_t*) w)[1];
- const int32_t vb2 = (int32_t) ((const int8_t*) w)[2];
- const int32_t vb3 = (int32_t) ((const int8_t*) w)[3];
- w = (const void*) ((const int8_t*) w + 4);
-
- vacc0x0 += va0 * vb0;
- vacc0x1 += va0 * vb1;
- vacc0x2 += va0 * vb2;
- vacc0x3 += va0 * vb3;
- vacc1x0 += va1 * vb0;
- vacc1x1 += va1 * vb1;
- vacc1x2 += va1 * vb2;
- vacc1x3 += va1 * vb3;
- vacc2x0 += va2 * vb0;
- vacc2x1 += va2 * vb1;
- vacc2x2 += va2 * vb2;
- vacc2x3 += va2 * vb3;
-
- k -= sizeof(int8_t);
- } while (k != 0);
-
- float vfpacc0x0 = (float) vacc0x0;
- float vfpacc0x1 = (float) vacc0x1;
- float vfpacc0x2 = (float) vacc0x2;
- float vfpacc0x3 = (float) vacc0x3;
- float vfpacc1x0 = (float) vacc1x0;
- float vfpacc1x1 = (float) vacc1x1;
- float vfpacc1x2 = (float) vacc1x2;
- float vfpacc1x3 = (float) vacc1x3;
- float vfpacc2x0 = (float) vacc2x0;
- float vfpacc2x1 = (float) vacc2x1;
- float vfpacc2x2 = (float) vacc2x2;
- float vfpacc2x3 = (float) vacc2x3;
-
- const float vscale0 = ((const float*) w)[0];
- vfpacc0x0 *= vscale0;
- vfpacc1x0 *= vscale0;
- vfpacc2x0 *= vscale0;
- const float vscale1 = ((const float*) w)[1];
- vfpacc0x1 *= vscale1;
- vfpacc1x1 *= vscale1;
- vfpacc2x1 *= vscale1;
- const float vscale2 = ((const float*) w)[2];
- vfpacc0x2 *= vscale2;
- vfpacc1x2 *= vscale2;
- vfpacc2x2 *= vscale2;
- const float vscale3 = ((const float*) w)[3];
- vfpacc0x3 *= vscale3;
- vfpacc1x3 *= vscale3;
- vfpacc2x3 *= vscale3;
- w = (const void*) ((const float*) w + 4);
-
- long vrndacc0x0 = lrintf(vfpacc0x0);
- long vrndacc0x1 = lrintf(vfpacc0x1);
- long vrndacc0x2 = lrintf(vfpacc0x2);
- long vrndacc0x3 = lrintf(vfpacc0x3);
- long vrndacc1x0 = lrintf(vfpacc1x0);
- long vrndacc1x1 = lrintf(vfpacc1x1);
- long vrndacc1x2 = lrintf(vfpacc1x2);
- long vrndacc1x3 = lrintf(vfpacc1x3);
- long vrndacc2x0 = lrintf(vfpacc2x0);
- long vrndacc2x1 = lrintf(vfpacc2x1);
- long vrndacc2x2 = lrintf(vfpacc2x2);
- long vrndacc2x3 = lrintf(vfpacc2x3);
-
- const long voutput_min_less_zero_point = params->scalar_lrint.output_min_less_zero_point;
- vrndacc0x0 = XNN_UNPREDICTABLE(vrndacc0x0 < voutput_min_less_zero_point) ? voutput_min_less_zero_point : vrndacc0x0;
- vrndacc0x1 = XNN_UNPREDICTABLE(vrndacc0x1 < voutput_min_less_zero_point) ? voutput_min_less_zero_point : vrndacc0x1;
- vrndacc0x2 = XNN_UNPREDICTABLE(vrndacc0x2 < voutput_min_less_zero_point) ? voutput_min_less_zero_point : vrndacc0x2;
- vrndacc0x3 = XNN_UNPREDICTABLE(vrndacc0x3 < voutput_min_less_zero_point) ? voutput_min_less_zero_point : vrndacc0x3;
- vrndacc1x0 = XNN_UNPREDICTABLE(vrndacc1x0 < voutput_min_less_zero_point) ? voutput_min_less_zero_point : vrndacc1x0;
- vrndacc1x1 = XNN_UNPREDICTABLE(vrndacc1x1 < voutput_min_less_zero_point) ? voutput_min_less_zero_point : vrndacc1x1;
- vrndacc1x2 = XNN_UNPREDICTABLE(vrndacc1x2 < voutput_min_less_zero_point) ? voutput_min_less_zero_point : vrndacc1x2;
- vrndacc1x3 = XNN_UNPREDICTABLE(vrndacc1x3 < voutput_min_less_zero_point) ? voutput_min_less_zero_point : vrndacc1x3;
- vrndacc2x0 = XNN_UNPREDICTABLE(vrndacc2x0 < voutput_min_less_zero_point) ? voutput_min_less_zero_point : vrndacc2x0;
- vrndacc2x1 = XNN_UNPREDICTABLE(vrndacc2x1 < voutput_min_less_zero_point) ? voutput_min_less_zero_point : vrndacc2x1;
- vrndacc2x2 = XNN_UNPREDICTABLE(vrndacc2x2 < voutput_min_less_zero_point) ? voutput_min_less_zero_point : vrndacc2x2;
- vrndacc2x3 = XNN_UNPREDICTABLE(vrndacc2x3 < voutput_min_less_zero_point) ? voutput_min_less_zero_point : vrndacc2x3;
-
- const long voutput_max_less_zero_point = params->scalar_lrint.output_max_less_zero_point;
- vrndacc0x0 = XNN_UNPREDICTABLE(vrndacc0x0 > voutput_max_less_zero_point) ? voutput_max_less_zero_point : vrndacc0x0;
- vrndacc0x1 = XNN_UNPREDICTABLE(vrndacc0x1 > voutput_max_less_zero_point) ? voutput_max_less_zero_point : vrndacc0x1;
- vrndacc0x2 = XNN_UNPREDICTABLE(vrndacc0x2 > voutput_max_less_zero_point) ? voutput_max_less_zero_point : vrndacc0x2;
- vrndacc0x3 = XNN_UNPREDICTABLE(vrndacc0x3 > voutput_max_less_zero_point) ? voutput_max_less_zero_point : vrndacc0x3;
- vrndacc1x0 = XNN_UNPREDICTABLE(vrndacc1x0 > voutput_max_less_zero_point) ? voutput_max_less_zero_point : vrndacc1x0;
- vrndacc1x1 = XNN_UNPREDICTABLE(vrndacc1x1 > voutput_max_less_zero_point) ? voutput_max_less_zero_point : vrndacc1x1;
- vrndacc1x2 = XNN_UNPREDICTABLE(vrndacc1x2 > voutput_max_less_zero_point) ? voutput_max_less_zero_point : vrndacc1x2;
- vrndacc1x3 = XNN_UNPREDICTABLE(vrndacc1x3 > voutput_max_less_zero_point) ? voutput_max_less_zero_point : vrndacc1x3;
- vrndacc2x0 = XNN_UNPREDICTABLE(vrndacc2x0 > voutput_max_less_zero_point) ? voutput_max_less_zero_point : vrndacc2x0;
- vrndacc2x1 = XNN_UNPREDICTABLE(vrndacc2x1 > voutput_max_less_zero_point) ? voutput_max_less_zero_point : vrndacc2x1;
- vrndacc2x2 = XNN_UNPREDICTABLE(vrndacc2x2 > voutput_max_less_zero_point) ? voutput_max_less_zero_point : vrndacc2x2;
- vrndacc2x3 = XNN_UNPREDICTABLE(vrndacc2x3 > voutput_max_less_zero_point) ? voutput_max_less_zero_point : vrndacc2x3;
-
- const int32_t voutput_zero_point = params->scalar_lrint.output_zero_point;
- int32_t vout0x0 = (int32_t) vrndacc0x0 + voutput_zero_point;
- int32_t vout0x1 = (int32_t) vrndacc0x1 + voutput_zero_point;
- int32_t vout0x2 = (int32_t) vrndacc0x2 + voutput_zero_point;
- int32_t vout0x3 = (int32_t) vrndacc0x3 + voutput_zero_point;
- int32_t vout1x0 = (int32_t) vrndacc1x0 + voutput_zero_point;
- int32_t vout1x1 = (int32_t) vrndacc1x1 + voutput_zero_point;
- int32_t vout1x2 = (int32_t) vrndacc1x2 + voutput_zero_point;
- int32_t vout1x3 = (int32_t) vrndacc1x3 + voutput_zero_point;
- int32_t vout2x0 = (int32_t) vrndacc2x0 + voutput_zero_point;
- int32_t vout2x1 = (int32_t) vrndacc2x1 + voutput_zero_point;
- int32_t vout2x2 = (int32_t) vrndacc2x2 + voutput_zero_point;
- int32_t vout2x3 = (int32_t) vrndacc2x3 + voutput_zero_point;
-
- if XNN_LIKELY(nc >= 4) {
- c0[0] = (int8_t) vout0x0;
- c0[1] = (int8_t) vout0x1;
- c0[2] = (int8_t) vout0x2;
- c0[3] = (int8_t) vout0x3;
- c1[0] = (int8_t) vout1x0;
- c1[1] = (int8_t) vout1x1;
- c1[2] = (int8_t) vout1x2;
- c1[3] = (int8_t) vout1x3;
- c2[0] = (int8_t) vout2x0;
- c2[1] = (int8_t) vout2x1;
- c2[2] = (int8_t) vout2x2;
- c2[3] = (int8_t) vout2x3;
-
- a0 = (const int8_t*) ((uintptr_t) a0 - kc);
- a1 = (const int8_t*) ((uintptr_t) a1 - kc);
- a2 = (const int8_t*) ((uintptr_t) a2 - kc);
-
- c0 = (int8_t*) ((uintptr_t) c0 + cn_stride);
- c1 = (int8_t*) ((uintptr_t) c1 + cn_stride);
- c2 = (int8_t*) ((uintptr_t) c2 + cn_stride);
-
- nc -= 4;
- } else {
- if (nc & 2) {
- c0[0] = (int8_t) vout0x0;
- c0[1] = (int8_t) vout0x1;
- vout0x0 = vout0x2;
- c0 += 2;
- c1[0] = (int8_t) vout1x0;
- c1[1] = (int8_t) vout1x1;
- vout1x0 = vout1x2;
- c1 += 2;
- c2[0] = (int8_t) vout2x0;
- c2[1] = (int8_t) vout2x1;
- vout2x0 = vout2x2;
- c2 += 2;
- }
- if (nc & 1) {
- c0[0] = (int8_t) vout0x0;
- c1[0] = (int8_t) vout1x0;
- c2[0] = (int8_t) vout2x0;
- }
-
- nc = 0;
- }
- } while (nc != 0);
-}
diff --git a/src/qc8-gemm/gen/3x4-minmax-fp32-scalar-magic.c b/src/qc8-gemm/gen/3x4-minmax-fp32-scalar-lrintf.c
similarity index 76%
copy from src/qc8-gemm/gen/3x4-minmax-fp32-scalar-magic.c
copy to src/qc8-gemm/gen/3x4-minmax-fp32-scalar-lrintf.c
index d0dd95c..ff64ea8 100644
--- a/src/qc8-gemm/gen/3x4-minmax-fp32-scalar-magic.c
+++ b/src/qc8-gemm/gen/3x4-minmax-fp32-scalar-lrintf.c
@@ -8,14 +8,13 @@
// LICENSE file in the root directory of this source tree.
#include <assert.h>
-
-#include <fp16.h>
+#include <math.h>
#include <xnnpack/math.h>
#include <xnnpack/gemm.h>
-void xnn_qc8_gemm_minmax_fp32_ukernel_3x4__scalar_magic(
+void xnn_qc8_gemm_minmax_fp32_ukernel_3x4__scalar_lrintf(
size_t mr,
size_t nc,
size_t kc,
@@ -121,7 +120,7 @@
vfpacc2x3 *= vscale3;
w = (const void*) ((const float*) w + 4);
- const float voutput_min_less_zero_point = params->scalar_magic.output_min_less_zero_point;
+ const float voutput_min_less_zero_point = params->scalar_lrintf.output_min_less_zero_point;
vfpacc0x0 = math_max_f32(vfpacc0x0, voutput_min_less_zero_point);
vfpacc0x1 = math_max_f32(vfpacc0x1, voutput_min_less_zero_point);
vfpacc0x2 = math_max_f32(vfpacc0x2, voutput_min_less_zero_point);
@@ -135,7 +134,7 @@
vfpacc2x2 = math_max_f32(vfpacc2x2, voutput_min_less_zero_point);
vfpacc2x3 = math_max_f32(vfpacc2x3, voutput_min_less_zero_point);
- const float voutput_max_less_zero_point = params->scalar_magic.output_max_less_zero_point;
+ const float voutput_max_less_zero_point = params->scalar_lrintf.output_max_less_zero_point;
vfpacc0x0 = math_min_f32(vfpacc0x0, voutput_max_less_zero_point);
vfpacc0x1 = math_min_f32(vfpacc0x1, voutput_max_less_zero_point);
vfpacc0x2 = math_min_f32(vfpacc0x2, voutput_max_less_zero_point);
@@ -149,33 +148,32 @@
vfpacc2x2 = math_min_f32(vfpacc2x2, voutput_max_less_zero_point);
vfpacc2x3 = math_min_f32(vfpacc2x3, voutput_max_less_zero_point);
- const float vmagic_bias = params->scalar_magic.magic_bias;
- vfpacc0x0 += vmagic_bias;
- vfpacc0x1 += vmagic_bias;
- vfpacc0x2 += vmagic_bias;
- vfpacc0x3 += vmagic_bias;
- vfpacc1x0 += vmagic_bias;
- vfpacc1x1 += vmagic_bias;
- vfpacc1x2 += vmagic_bias;
- vfpacc1x3 += vmagic_bias;
- vfpacc2x0 += vmagic_bias;
- vfpacc2x1 += vmagic_bias;
- vfpacc2x2 += vmagic_bias;
- vfpacc2x3 += vmagic_bias;
+ const int32_t vrndacc0x0 = (int32_t) lrintf(vfpacc0x0);
+ const int32_t vrndacc0x1 = (int32_t) lrintf(vfpacc0x1);
+ const int32_t vrndacc0x2 = (int32_t) lrintf(vfpacc0x2);
+ const int32_t vrndacc0x3 = (int32_t) lrintf(vfpacc0x3);
+ const int32_t vrndacc1x0 = (int32_t) lrintf(vfpacc1x0);
+ const int32_t vrndacc1x1 = (int32_t) lrintf(vfpacc1x1);
+ const int32_t vrndacc1x2 = (int32_t) lrintf(vfpacc1x2);
+ const int32_t vrndacc1x3 = (int32_t) lrintf(vfpacc1x3);
+ const int32_t vrndacc2x0 = (int32_t) lrintf(vfpacc2x0);
+ const int32_t vrndacc2x1 = (int32_t) lrintf(vfpacc2x1);
+ const int32_t vrndacc2x2 = (int32_t) lrintf(vfpacc2x2);
+ const int32_t vrndacc2x3 = (int32_t) lrintf(vfpacc2x3);
- const int32_t vmagic_bias_less_output_zero_point = params->scalar_magic.magic_bias_less_output_zero_point;
- int32_t vout0x0 = (int32_t) fp32_to_bits(vfpacc0x0) - vmagic_bias_less_output_zero_point;
- int32_t vout0x1 = (int32_t) fp32_to_bits(vfpacc0x1) - vmagic_bias_less_output_zero_point;
- int32_t vout0x2 = (int32_t) fp32_to_bits(vfpacc0x2) - vmagic_bias_less_output_zero_point;
- int32_t vout0x3 = (int32_t) fp32_to_bits(vfpacc0x3) - vmagic_bias_less_output_zero_point;
- int32_t vout1x0 = (int32_t) fp32_to_bits(vfpacc1x0) - vmagic_bias_less_output_zero_point;
- int32_t vout1x1 = (int32_t) fp32_to_bits(vfpacc1x1) - vmagic_bias_less_output_zero_point;
- int32_t vout1x2 = (int32_t) fp32_to_bits(vfpacc1x2) - vmagic_bias_less_output_zero_point;
- int32_t vout1x3 = (int32_t) fp32_to_bits(vfpacc1x3) - vmagic_bias_less_output_zero_point;
- int32_t vout2x0 = (int32_t) fp32_to_bits(vfpacc2x0) - vmagic_bias_less_output_zero_point;
- int32_t vout2x1 = (int32_t) fp32_to_bits(vfpacc2x1) - vmagic_bias_less_output_zero_point;
- int32_t vout2x2 = (int32_t) fp32_to_bits(vfpacc2x2) - vmagic_bias_less_output_zero_point;
- int32_t vout2x3 = (int32_t) fp32_to_bits(vfpacc2x3) - vmagic_bias_less_output_zero_point;
+ const int32_t voutput_zero_point = params->scalar_lrintf.output_zero_point;
+ int32_t vout0x0 = vrndacc0x0 + voutput_zero_point;
+ int32_t vout0x1 = vrndacc0x1 + voutput_zero_point;
+ int32_t vout0x2 = vrndacc0x2 + voutput_zero_point;
+ int32_t vout0x3 = vrndacc0x3 + voutput_zero_point;
+ int32_t vout1x0 = vrndacc1x0 + voutput_zero_point;
+ int32_t vout1x1 = vrndacc1x1 + voutput_zero_point;
+ int32_t vout1x2 = vrndacc1x2 + voutput_zero_point;
+ int32_t vout1x3 = vrndacc1x3 + voutput_zero_point;
+ int32_t vout2x0 = vrndacc2x0 + voutput_zero_point;
+ int32_t vout2x1 = vrndacc2x1 + voutput_zero_point;
+ int32_t vout2x2 = vrndacc2x2 + voutput_zero_point;
+ int32_t vout2x3 = vrndacc2x3 + voutput_zero_point;
if XNN_LIKELY(nc >= 4) {
c0[0] = (int8_t) vout0x0;
diff --git a/src/qc8-gemm/gen/4x2-minmax-fp32-scalar-magic.c b/src/qc8-gemm/gen/4x2-minmax-fp32-scalar-fmagic.c
similarity index 94%
rename from src/qc8-gemm/gen/4x2-minmax-fp32-scalar-magic.c
rename to src/qc8-gemm/gen/4x2-minmax-fp32-scalar-fmagic.c
index b453532..3d3b5bc 100644
--- a/src/qc8-gemm/gen/4x2-minmax-fp32-scalar-magic.c
+++ b/src/qc8-gemm/gen/4x2-minmax-fp32-scalar-fmagic.c
@@ -15,7 +15,7 @@
#include <xnnpack/gemm.h>
-void xnn_qc8_gemm_minmax_fp32_ukernel_4x2__scalar_magic(
+void xnn_qc8_gemm_minmax_fp32_ukernel_4x2__scalar_fmagic(
size_t mr,
size_t nc,
size_t kc,
@@ -109,7 +109,7 @@
vfpacc3x1 *= vscale1;
w = (const void*) ((const float*) w + 2);
- const float voutput_min_less_zero_point = params->scalar_magic.output_min_less_zero_point;
+ const float voutput_min_less_zero_point = params->scalar_fmagic.output_min_less_zero_point;
vfpacc0x0 = math_max_f32(vfpacc0x0, voutput_min_less_zero_point);
vfpacc0x1 = math_max_f32(vfpacc0x1, voutput_min_less_zero_point);
vfpacc1x0 = math_max_f32(vfpacc1x0, voutput_min_less_zero_point);
@@ -119,7 +119,7 @@
vfpacc3x0 = math_max_f32(vfpacc3x0, voutput_min_less_zero_point);
vfpacc3x1 = math_max_f32(vfpacc3x1, voutput_min_less_zero_point);
- const float voutput_max_less_zero_point = params->scalar_magic.output_max_less_zero_point;
+ const float voutput_max_less_zero_point = params->scalar_fmagic.output_max_less_zero_point;
vfpacc0x0 = math_min_f32(vfpacc0x0, voutput_max_less_zero_point);
vfpacc0x1 = math_min_f32(vfpacc0x1, voutput_max_less_zero_point);
vfpacc1x0 = math_min_f32(vfpacc1x0, voutput_max_less_zero_point);
@@ -129,7 +129,7 @@
vfpacc3x0 = math_min_f32(vfpacc3x0, voutput_max_less_zero_point);
vfpacc3x1 = math_min_f32(vfpacc3x1, voutput_max_less_zero_point);
- const float vmagic_bias = params->scalar_magic.magic_bias;
+ const float vmagic_bias = params->scalar_fmagic.magic_bias;
vfpacc0x0 += vmagic_bias;
vfpacc0x1 += vmagic_bias;
vfpacc1x0 += vmagic_bias;
@@ -139,7 +139,7 @@
vfpacc3x0 += vmagic_bias;
vfpacc3x1 += vmagic_bias;
- const int32_t vmagic_bias_less_output_zero_point = params->scalar_magic.magic_bias_less_output_zero_point;
+ const int32_t vmagic_bias_less_output_zero_point = params->scalar_fmagic.magic_bias_less_output_zero_point;
int32_t vout0x0 = (int32_t) fp32_to_bits(vfpacc0x0) - vmagic_bias_less_output_zero_point;
int32_t vout0x1 = (int32_t) fp32_to_bits(vfpacc0x1) - vmagic_bias_less_output_zero_point;
int32_t vout1x0 = (int32_t) fp32_to_bits(vfpacc1x0) - vmagic_bias_less_output_zero_point;
diff --git a/src/qc8-gemm/gen/4x2-minmax-fp32-scalar-lrint.c b/src/qc8-gemm/gen/4x2-minmax-fp32-scalar-lrint.c
deleted file mode 100644
index ca0ddef..0000000
--- a/src/qc8-gemm/gen/4x2-minmax-fp32-scalar-lrint.c
+++ /dev/null
@@ -1,182 +0,0 @@
-// Auto-generated file. Do not edit!
-// Template: src/qs8-gemm/scalar.c.in
-// Generator: tools/xngen
-//
-// Copyright 2021 Google LLC
-//
-// This source code is licensed under the BSD-style license found in the
-// LICENSE file in the root directory of this source tree.
-
-#include <assert.h>
-#include <math.h>
-
-#include <xnnpack/math.h>
-#include <xnnpack/gemm.h>
-
-
-void xnn_qc8_gemm_minmax_fp32_ukernel_4x2__scalar_lrint(
- size_t mr,
- size_t nc,
- size_t kc,
- const int8_t* restrict a,
- size_t a_stride,
- const void* restrict w,
- int8_t* restrict c,
- size_t cm_stride,
- size_t cn_stride,
- const union xnn_qs8_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
-{
- assert(mr != 0);
- assert(mr <= 4);
- assert(nc != 0);
- assert(kc != 0);
-
- const int8_t* a0 = a;
- int8_t* c0 = c;
- const int8_t* a1 = (const int8_t*) ((uintptr_t) a0 + a_stride);
- int8_t* c1 = (int8_t*) ((uintptr_t) c0 + cm_stride);
- if XNN_UNPREDICTABLE(mr < 2) {
- a1 = a0;
- c1 = c0;
- }
- const int8_t* a2 = (const int8_t*) ((uintptr_t) a1 + a_stride);
- int8_t* c2 = (int8_t*) ((uintptr_t) c1 + cm_stride);
- if XNN_UNPREDICTABLE(mr <= 2) {
- a2 = a1;
- c2 = c1;
- }
- const int8_t* a3 = (const int8_t*) ((uintptr_t) a2 + a_stride);
- int8_t* c3 = (int8_t*) ((uintptr_t) c2 + cm_stride);
- if XNN_UNPREDICTABLE(mr != 4) {
- a3 = a2;
- c3 = c2;
- }
-
- do {
- int32_t vacc0x0 = ((const int32_t*) w)[0];
- int32_t vacc0x1 = ((const int32_t*) w)[1];
- int32_t vacc1x0 = vacc0x0;
- int32_t vacc1x1 = vacc0x1;
- int32_t vacc2x0 = vacc0x0;
- int32_t vacc2x1 = vacc0x1;
- int32_t vacc3x0 = vacc0x0;
- int32_t vacc3x1 = vacc0x1;
- w = (const void*) ((const int32_t*) w + 2);
-
- size_t k = kc;
- do {
- const int32_t va0 = (int32_t) *a0++;
- const int32_t va1 = (int32_t) *a1++;
- const int32_t va2 = (int32_t) *a2++;
- const int32_t va3 = (int32_t) *a3++;
-
- const int32_t vb0 = (int32_t) ((const int8_t*) w)[0];
- const int32_t vb1 = (int32_t) ((const int8_t*) w)[1];
- w = (const void*) ((const int8_t*) w + 2);
-
- vacc0x0 += va0 * vb0;
- vacc0x1 += va0 * vb1;
- vacc1x0 += va1 * vb0;
- vacc1x1 += va1 * vb1;
- vacc2x0 += va2 * vb0;
- vacc2x1 += va2 * vb1;
- vacc3x0 += va3 * vb0;
- vacc3x1 += va3 * vb1;
-
- k -= sizeof(int8_t);
- } while (k != 0);
-
- float vfpacc0x0 = (float) vacc0x0;
- float vfpacc0x1 = (float) vacc0x1;
- float vfpacc1x0 = (float) vacc1x0;
- float vfpacc1x1 = (float) vacc1x1;
- float vfpacc2x0 = (float) vacc2x0;
- float vfpacc2x1 = (float) vacc2x1;
- float vfpacc3x0 = (float) vacc3x0;
- float vfpacc3x1 = (float) vacc3x1;
-
- typedef XNN_UNALIGNED float unaligned_float;
- const float vscale0 = ((const unaligned_float*) w)[0];
- vfpacc0x0 *= vscale0;
- vfpacc1x0 *= vscale0;
- vfpacc2x0 *= vscale0;
- vfpacc3x0 *= vscale0;
- const float vscale1 = ((const unaligned_float*) w)[1];
- vfpacc0x1 *= vscale1;
- vfpacc1x1 *= vscale1;
- vfpacc2x1 *= vscale1;
- vfpacc3x1 *= vscale1;
- w = (const void*) ((const float*) w + 2);
-
- long vrndacc0x0 = lrintf(vfpacc0x0);
- long vrndacc0x1 = lrintf(vfpacc0x1);
- long vrndacc1x0 = lrintf(vfpacc1x0);
- long vrndacc1x1 = lrintf(vfpacc1x1);
- long vrndacc2x0 = lrintf(vfpacc2x0);
- long vrndacc2x1 = lrintf(vfpacc2x1);
- long vrndacc3x0 = lrintf(vfpacc3x0);
- long vrndacc3x1 = lrintf(vfpacc3x1);
-
- const long voutput_min_less_zero_point = params->scalar_lrint.output_min_less_zero_point;
- vrndacc0x0 = XNN_UNPREDICTABLE(vrndacc0x0 < voutput_min_less_zero_point) ? voutput_min_less_zero_point : vrndacc0x0;
- vrndacc0x1 = XNN_UNPREDICTABLE(vrndacc0x1 < voutput_min_less_zero_point) ? voutput_min_less_zero_point : vrndacc0x1;
- vrndacc1x0 = XNN_UNPREDICTABLE(vrndacc1x0 < voutput_min_less_zero_point) ? voutput_min_less_zero_point : vrndacc1x0;
- vrndacc1x1 = XNN_UNPREDICTABLE(vrndacc1x1 < voutput_min_less_zero_point) ? voutput_min_less_zero_point : vrndacc1x1;
- vrndacc2x0 = XNN_UNPREDICTABLE(vrndacc2x0 < voutput_min_less_zero_point) ? voutput_min_less_zero_point : vrndacc2x0;
- vrndacc2x1 = XNN_UNPREDICTABLE(vrndacc2x1 < voutput_min_less_zero_point) ? voutput_min_less_zero_point : vrndacc2x1;
- vrndacc3x0 = XNN_UNPREDICTABLE(vrndacc3x0 < voutput_min_less_zero_point) ? voutput_min_less_zero_point : vrndacc3x0;
- vrndacc3x1 = XNN_UNPREDICTABLE(vrndacc3x1 < voutput_min_less_zero_point) ? voutput_min_less_zero_point : vrndacc3x1;
-
- const long voutput_max_less_zero_point = params->scalar_lrint.output_max_less_zero_point;
- vrndacc0x0 = XNN_UNPREDICTABLE(vrndacc0x0 > voutput_max_less_zero_point) ? voutput_max_less_zero_point : vrndacc0x0;
- vrndacc0x1 = XNN_UNPREDICTABLE(vrndacc0x1 > voutput_max_less_zero_point) ? voutput_max_less_zero_point : vrndacc0x1;
- vrndacc1x0 = XNN_UNPREDICTABLE(vrndacc1x0 > voutput_max_less_zero_point) ? voutput_max_less_zero_point : vrndacc1x0;
- vrndacc1x1 = XNN_UNPREDICTABLE(vrndacc1x1 > voutput_max_less_zero_point) ? voutput_max_less_zero_point : vrndacc1x1;
- vrndacc2x0 = XNN_UNPREDICTABLE(vrndacc2x0 > voutput_max_less_zero_point) ? voutput_max_less_zero_point : vrndacc2x0;
- vrndacc2x1 = XNN_UNPREDICTABLE(vrndacc2x1 > voutput_max_less_zero_point) ? voutput_max_less_zero_point : vrndacc2x1;
- vrndacc3x0 = XNN_UNPREDICTABLE(vrndacc3x0 > voutput_max_less_zero_point) ? voutput_max_less_zero_point : vrndacc3x0;
- vrndacc3x1 = XNN_UNPREDICTABLE(vrndacc3x1 > voutput_max_less_zero_point) ? voutput_max_less_zero_point : vrndacc3x1;
-
- const int32_t voutput_zero_point = params->scalar_lrint.output_zero_point;
- int32_t vout0x0 = (int32_t) vrndacc0x0 + voutput_zero_point;
- int32_t vout0x1 = (int32_t) vrndacc0x1 + voutput_zero_point;
- int32_t vout1x0 = (int32_t) vrndacc1x0 + voutput_zero_point;
- int32_t vout1x1 = (int32_t) vrndacc1x1 + voutput_zero_point;
- int32_t vout2x0 = (int32_t) vrndacc2x0 + voutput_zero_point;
- int32_t vout2x1 = (int32_t) vrndacc2x1 + voutput_zero_point;
- int32_t vout3x0 = (int32_t) vrndacc3x0 + voutput_zero_point;
- int32_t vout3x1 = (int32_t) vrndacc3x1 + voutput_zero_point;
-
- if XNN_LIKELY(nc >= 2) {
- c0[0] = (int8_t) vout0x0;
- c0[1] = (int8_t) vout0x1;
- c1[0] = (int8_t) vout1x0;
- c1[1] = (int8_t) vout1x1;
- c2[0] = (int8_t) vout2x0;
- c2[1] = (int8_t) vout2x1;
- c3[0] = (int8_t) vout3x0;
- c3[1] = (int8_t) vout3x1;
-
- a0 = (const int8_t*) ((uintptr_t) a0 - kc);
- a1 = (const int8_t*) ((uintptr_t) a1 - kc);
- a2 = (const int8_t*) ((uintptr_t) a2 - kc);
- a3 = (const int8_t*) ((uintptr_t) a3 - kc);
-
- c0 = (int8_t*) ((uintptr_t) c0 + cn_stride);
- c1 = (int8_t*) ((uintptr_t) c1 + cn_stride);
- c2 = (int8_t*) ((uintptr_t) c2 + cn_stride);
- c3 = (int8_t*) ((uintptr_t) c3 + cn_stride);
-
- nc -= 2;
- } else {
- if (nc & 1) {
- c0[0] = (int8_t) vout0x0;
- c1[0] = (int8_t) vout1x0;
- c2[0] = (int8_t) vout2x0;
- c3[0] = (int8_t) vout3x0;
- }
-
- nc = 0;
- }
- } while (nc != 0);
-}
diff --git a/src/qc8-gemm/gen/4x2-minmax-fp32-scalar-magic.c b/src/qc8-gemm/gen/4x2-minmax-fp32-scalar-lrintf.c
similarity index 77%
copy from src/qc8-gemm/gen/4x2-minmax-fp32-scalar-magic.c
copy to src/qc8-gemm/gen/4x2-minmax-fp32-scalar-lrintf.c
index b453532..c95becb 100644
--- a/src/qc8-gemm/gen/4x2-minmax-fp32-scalar-magic.c
+++ b/src/qc8-gemm/gen/4x2-minmax-fp32-scalar-lrintf.c
@@ -8,14 +8,13 @@
// LICENSE file in the root directory of this source tree.
#include <assert.h>
-
-#include <fp16.h>
+#include <math.h>
#include <xnnpack/math.h>
#include <xnnpack/gemm.h>
-void xnn_qc8_gemm_minmax_fp32_ukernel_4x2__scalar_magic(
+void xnn_qc8_gemm_minmax_fp32_ukernel_4x2__scalar_lrintf(
size_t mr,
size_t nc,
size_t kc,
@@ -109,7 +108,7 @@
vfpacc3x1 *= vscale1;
w = (const void*) ((const float*) w + 2);
- const float voutput_min_less_zero_point = params->scalar_magic.output_min_less_zero_point;
+ const float voutput_min_less_zero_point = params->scalar_lrintf.output_min_less_zero_point;
vfpacc0x0 = math_max_f32(vfpacc0x0, voutput_min_less_zero_point);
vfpacc0x1 = math_max_f32(vfpacc0x1, voutput_min_less_zero_point);
vfpacc1x0 = math_max_f32(vfpacc1x0, voutput_min_less_zero_point);
@@ -119,7 +118,7 @@
vfpacc3x0 = math_max_f32(vfpacc3x0, voutput_min_less_zero_point);
vfpacc3x1 = math_max_f32(vfpacc3x1, voutput_min_less_zero_point);
- const float voutput_max_less_zero_point = params->scalar_magic.output_max_less_zero_point;
+ const float voutput_max_less_zero_point = params->scalar_lrintf.output_max_less_zero_point;
vfpacc0x0 = math_min_f32(vfpacc0x0, voutput_max_less_zero_point);
vfpacc0x1 = math_min_f32(vfpacc0x1, voutput_max_less_zero_point);
vfpacc1x0 = math_min_f32(vfpacc1x0, voutput_max_less_zero_point);
@@ -129,25 +128,24 @@
vfpacc3x0 = math_min_f32(vfpacc3x0, voutput_max_less_zero_point);
vfpacc3x1 = math_min_f32(vfpacc3x1, voutput_max_less_zero_point);
- const float vmagic_bias = params->scalar_magic.magic_bias;
- vfpacc0x0 += vmagic_bias;
- vfpacc0x1 += vmagic_bias;
- vfpacc1x0 += vmagic_bias;
- vfpacc1x1 += vmagic_bias;
- vfpacc2x0 += vmagic_bias;
- vfpacc2x1 += vmagic_bias;
- vfpacc3x0 += vmagic_bias;
- vfpacc3x1 += vmagic_bias;
+ const int32_t vrndacc0x0 = (int32_t) lrintf(vfpacc0x0);
+ const int32_t vrndacc0x1 = (int32_t) lrintf(vfpacc0x1);
+ const int32_t vrndacc1x0 = (int32_t) lrintf(vfpacc1x0);
+ const int32_t vrndacc1x1 = (int32_t) lrintf(vfpacc1x1);
+ const int32_t vrndacc2x0 = (int32_t) lrintf(vfpacc2x0);
+ const int32_t vrndacc2x1 = (int32_t) lrintf(vfpacc2x1);
+ const int32_t vrndacc3x0 = (int32_t) lrintf(vfpacc3x0);
+ const int32_t vrndacc3x1 = (int32_t) lrintf(vfpacc3x1);
- const int32_t vmagic_bias_less_output_zero_point = params->scalar_magic.magic_bias_less_output_zero_point;
- int32_t vout0x0 = (int32_t) fp32_to_bits(vfpacc0x0) - vmagic_bias_less_output_zero_point;
- int32_t vout0x1 = (int32_t) fp32_to_bits(vfpacc0x1) - vmagic_bias_less_output_zero_point;
- int32_t vout1x0 = (int32_t) fp32_to_bits(vfpacc1x0) - vmagic_bias_less_output_zero_point;
- int32_t vout1x1 = (int32_t) fp32_to_bits(vfpacc1x1) - vmagic_bias_less_output_zero_point;
- int32_t vout2x0 = (int32_t) fp32_to_bits(vfpacc2x0) - vmagic_bias_less_output_zero_point;
- int32_t vout2x1 = (int32_t) fp32_to_bits(vfpacc2x1) - vmagic_bias_less_output_zero_point;
- int32_t vout3x0 = (int32_t) fp32_to_bits(vfpacc3x0) - vmagic_bias_less_output_zero_point;
- int32_t vout3x1 = (int32_t) fp32_to_bits(vfpacc3x1) - vmagic_bias_less_output_zero_point;
+ const int32_t voutput_zero_point = params->scalar_lrintf.output_zero_point;
+ int32_t vout0x0 = vrndacc0x0 + voutput_zero_point;
+ int32_t vout0x1 = vrndacc0x1 + voutput_zero_point;
+ int32_t vout1x0 = vrndacc1x0 + voutput_zero_point;
+ int32_t vout1x1 = vrndacc1x1 + voutput_zero_point;
+ int32_t vout2x0 = vrndacc2x0 + voutput_zero_point;
+ int32_t vout2x1 = vrndacc2x1 + voutput_zero_point;
+ int32_t vout3x0 = vrndacc3x0 + voutput_zero_point;
+ int32_t vout3x1 = vrndacc3x1 + voutput_zero_point;
if XNN_LIKELY(nc >= 2) {
c0[0] = (int8_t) vout0x0;
diff --git a/src/qc8-gemm/gen/4x4-minmax-fp32-scalar-magic.c b/src/qc8-gemm/gen/4x4-minmax-fp32-scalar-fmagic.c
similarity index 96%
rename from src/qc8-gemm/gen/4x4-minmax-fp32-scalar-magic.c
rename to src/qc8-gemm/gen/4x4-minmax-fp32-scalar-fmagic.c
index 9483acb..aff23b7 100644
--- a/src/qc8-gemm/gen/4x4-minmax-fp32-scalar-magic.c
+++ b/src/qc8-gemm/gen/4x4-minmax-fp32-scalar-fmagic.c
@@ -15,7 +15,7 @@
#include <xnnpack/gemm.h>
-void xnn_qc8_gemm_minmax_fp32_ukernel_4x4__scalar_magic(
+void xnn_qc8_gemm_minmax_fp32_ukernel_4x4__scalar_fmagic(
size_t mr,
size_t nc,
size_t kc,
@@ -144,7 +144,7 @@
vfpacc3x3 *= vscale3;
w = (const void*) ((const float*) w + 4);
- const float voutput_min_less_zero_point = params->scalar_magic.output_min_less_zero_point;
+ const float voutput_min_less_zero_point = params->scalar_fmagic.output_min_less_zero_point;
vfpacc0x0 = math_max_f32(vfpacc0x0, voutput_min_less_zero_point);
vfpacc0x1 = math_max_f32(vfpacc0x1, voutput_min_less_zero_point);
vfpacc0x2 = math_max_f32(vfpacc0x2, voutput_min_less_zero_point);
@@ -162,7 +162,7 @@
vfpacc3x2 = math_max_f32(vfpacc3x2, voutput_min_less_zero_point);
vfpacc3x3 = math_max_f32(vfpacc3x3, voutput_min_less_zero_point);
- const float voutput_max_less_zero_point = params->scalar_magic.output_max_less_zero_point;
+ const float voutput_max_less_zero_point = params->scalar_fmagic.output_max_less_zero_point;
vfpacc0x0 = math_min_f32(vfpacc0x0, voutput_max_less_zero_point);
vfpacc0x1 = math_min_f32(vfpacc0x1, voutput_max_less_zero_point);
vfpacc0x2 = math_min_f32(vfpacc0x2, voutput_max_less_zero_point);
@@ -180,7 +180,7 @@
vfpacc3x2 = math_min_f32(vfpacc3x2, voutput_max_less_zero_point);
vfpacc3x3 = math_min_f32(vfpacc3x3, voutput_max_less_zero_point);
- const float vmagic_bias = params->scalar_magic.magic_bias;
+ const float vmagic_bias = params->scalar_fmagic.magic_bias;
vfpacc0x0 += vmagic_bias;
vfpacc0x1 += vmagic_bias;
vfpacc0x2 += vmagic_bias;
@@ -198,7 +198,7 @@
vfpacc3x2 += vmagic_bias;
vfpacc3x3 += vmagic_bias;
- const int32_t vmagic_bias_less_output_zero_point = params->scalar_magic.magic_bias_less_output_zero_point;
+ const int32_t vmagic_bias_less_output_zero_point = params->scalar_fmagic.magic_bias_less_output_zero_point;
int32_t vout0x0 = (int32_t) fp32_to_bits(vfpacc0x0) - vmagic_bias_less_output_zero_point;
int32_t vout0x1 = (int32_t) fp32_to_bits(vfpacc0x1) - vmagic_bias_less_output_zero_point;
int32_t vout0x2 = (int32_t) fp32_to_bits(vfpacc0x2) - vmagic_bias_less_output_zero_point;
diff --git a/src/qc8-gemm/gen/4x4-minmax-fp32-scalar-lrint.c b/src/qc8-gemm/gen/4x4-minmax-fp32-scalar-lrint.c
deleted file mode 100644
index b2bf401..0000000
--- a/src/qc8-gemm/gen/4x4-minmax-fp32-scalar-lrint.c
+++ /dev/null
@@ -1,275 +0,0 @@
-// Auto-generated file. Do not edit!
-// Template: src/qs8-gemm/scalar.c.in
-// Generator: tools/xngen
-//
-// Copyright 2021 Google LLC
-//
-// This source code is licensed under the BSD-style license found in the
-// LICENSE file in the root directory of this source tree.
-
-#include <assert.h>
-#include <math.h>
-
-#include <xnnpack/math.h>
-#include <xnnpack/gemm.h>
-
-
-void xnn_qc8_gemm_minmax_fp32_ukernel_4x4__scalar_lrint(
- size_t mr,
- size_t nc,
- size_t kc,
- const int8_t* restrict a,
- size_t a_stride,
- const void* restrict w,
- int8_t* restrict c,
- size_t cm_stride,
- size_t cn_stride,
- const union xnn_qs8_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
-{
- assert(mr != 0);
- assert(mr <= 4);
- assert(nc != 0);
- assert(kc != 0);
-
- const int8_t* a0 = a;
- int8_t* c0 = c;
- const int8_t* a1 = (const int8_t*) ((uintptr_t) a0 + a_stride);
- int8_t* c1 = (int8_t*) ((uintptr_t) c0 + cm_stride);
- if XNN_UNPREDICTABLE(mr < 2) {
- a1 = a0;
- c1 = c0;
- }
- const int8_t* a2 = (const int8_t*) ((uintptr_t) a1 + a_stride);
- int8_t* c2 = (int8_t*) ((uintptr_t) c1 + cm_stride);
- if XNN_UNPREDICTABLE(mr <= 2) {
- a2 = a1;
- c2 = c1;
- }
- const int8_t* a3 = (const int8_t*) ((uintptr_t) a2 + a_stride);
- int8_t* c3 = (int8_t*) ((uintptr_t) c2 + cm_stride);
- if XNN_UNPREDICTABLE(mr != 4) {
- a3 = a2;
- c3 = c2;
- }
-
- do {
- int32_t vacc0x0 = ((const int32_t*) w)[0];
- int32_t vacc0x1 = ((const int32_t*) w)[1];
- int32_t vacc0x2 = ((const int32_t*) w)[2];
- int32_t vacc0x3 = ((const int32_t*) w)[3];
- int32_t vacc1x0 = vacc0x0;
- int32_t vacc1x1 = vacc0x1;
- int32_t vacc1x2 = vacc0x2;
- int32_t vacc1x3 = vacc0x3;
- int32_t vacc2x0 = vacc0x0;
- int32_t vacc2x1 = vacc0x1;
- int32_t vacc2x2 = vacc0x2;
- int32_t vacc2x3 = vacc0x3;
- int32_t vacc3x0 = vacc0x0;
- int32_t vacc3x1 = vacc0x1;
- int32_t vacc3x2 = vacc0x2;
- int32_t vacc3x3 = vacc0x3;
- w = (const void*) ((const int32_t*) w + 4);
-
- size_t k = kc;
- do {
- const int32_t va0 = (int32_t) *a0++;
- const int32_t va1 = (int32_t) *a1++;
- const int32_t va2 = (int32_t) *a2++;
- const int32_t va3 = (int32_t) *a3++;
-
- const int32_t vb0 = (int32_t) ((const int8_t*) w)[0];
- const int32_t vb1 = (int32_t) ((const int8_t*) w)[1];
- const int32_t vb2 = (int32_t) ((const int8_t*) w)[2];
- const int32_t vb3 = (int32_t) ((const int8_t*) w)[3];
- w = (const void*) ((const int8_t*) w + 4);
-
- vacc0x0 += va0 * vb0;
- vacc0x1 += va0 * vb1;
- vacc0x2 += va0 * vb2;
- vacc0x3 += va0 * vb3;
- vacc1x0 += va1 * vb0;
- vacc1x1 += va1 * vb1;
- vacc1x2 += va1 * vb2;
- vacc1x3 += va1 * vb3;
- vacc2x0 += va2 * vb0;
- vacc2x1 += va2 * vb1;
- vacc2x2 += va2 * vb2;
- vacc2x3 += va2 * vb3;
- vacc3x0 += va3 * vb0;
- vacc3x1 += va3 * vb1;
- vacc3x2 += va3 * vb2;
- vacc3x3 += va3 * vb3;
-
- k -= sizeof(int8_t);
- } while (k != 0);
-
- float vfpacc0x0 = (float) vacc0x0;
- float vfpacc0x1 = (float) vacc0x1;
- float vfpacc0x2 = (float) vacc0x2;
- float vfpacc0x3 = (float) vacc0x3;
- float vfpacc1x0 = (float) vacc1x0;
- float vfpacc1x1 = (float) vacc1x1;
- float vfpacc1x2 = (float) vacc1x2;
- float vfpacc1x3 = (float) vacc1x3;
- float vfpacc2x0 = (float) vacc2x0;
- float vfpacc2x1 = (float) vacc2x1;
- float vfpacc2x2 = (float) vacc2x2;
- float vfpacc2x3 = (float) vacc2x3;
- float vfpacc3x0 = (float) vacc3x0;
- float vfpacc3x1 = (float) vacc3x1;
- float vfpacc3x2 = (float) vacc3x2;
- float vfpacc3x3 = (float) vacc3x3;
-
- const float vscale0 = ((const float*) w)[0];
- vfpacc0x0 *= vscale0;
- vfpacc1x0 *= vscale0;
- vfpacc2x0 *= vscale0;
- vfpacc3x0 *= vscale0;
- const float vscale1 = ((const float*) w)[1];
- vfpacc0x1 *= vscale1;
- vfpacc1x1 *= vscale1;
- vfpacc2x1 *= vscale1;
- vfpacc3x1 *= vscale1;
- const float vscale2 = ((const float*) w)[2];
- vfpacc0x2 *= vscale2;
- vfpacc1x2 *= vscale2;
- vfpacc2x2 *= vscale2;
- vfpacc3x2 *= vscale2;
- const float vscale3 = ((const float*) w)[3];
- vfpacc0x3 *= vscale3;
- vfpacc1x3 *= vscale3;
- vfpacc2x3 *= vscale3;
- vfpacc3x3 *= vscale3;
- w = (const void*) ((const float*) w + 4);
-
- long vrndacc0x0 = lrintf(vfpacc0x0);
- long vrndacc0x1 = lrintf(vfpacc0x1);
- long vrndacc0x2 = lrintf(vfpacc0x2);
- long vrndacc0x3 = lrintf(vfpacc0x3);
- long vrndacc1x0 = lrintf(vfpacc1x0);
- long vrndacc1x1 = lrintf(vfpacc1x1);
- long vrndacc1x2 = lrintf(vfpacc1x2);
- long vrndacc1x3 = lrintf(vfpacc1x3);
- long vrndacc2x0 = lrintf(vfpacc2x0);
- long vrndacc2x1 = lrintf(vfpacc2x1);
- long vrndacc2x2 = lrintf(vfpacc2x2);
- long vrndacc2x3 = lrintf(vfpacc2x3);
- long vrndacc3x0 = lrintf(vfpacc3x0);
- long vrndacc3x1 = lrintf(vfpacc3x1);
- long vrndacc3x2 = lrintf(vfpacc3x2);
- long vrndacc3x3 = lrintf(vfpacc3x3);
-
- const long voutput_min_less_zero_point = params->scalar_lrint.output_min_less_zero_point;
- vrndacc0x0 = XNN_UNPREDICTABLE(vrndacc0x0 < voutput_min_less_zero_point) ? voutput_min_less_zero_point : vrndacc0x0;
- vrndacc0x1 = XNN_UNPREDICTABLE(vrndacc0x1 < voutput_min_less_zero_point) ? voutput_min_less_zero_point : vrndacc0x1;
- vrndacc0x2 = XNN_UNPREDICTABLE(vrndacc0x2 < voutput_min_less_zero_point) ? voutput_min_less_zero_point : vrndacc0x2;
- vrndacc0x3 = XNN_UNPREDICTABLE(vrndacc0x3 < voutput_min_less_zero_point) ? voutput_min_less_zero_point : vrndacc0x3;
- vrndacc1x0 = XNN_UNPREDICTABLE(vrndacc1x0 < voutput_min_less_zero_point) ? voutput_min_less_zero_point : vrndacc1x0;
- vrndacc1x1 = XNN_UNPREDICTABLE(vrndacc1x1 < voutput_min_less_zero_point) ? voutput_min_less_zero_point : vrndacc1x1;
- vrndacc1x2 = XNN_UNPREDICTABLE(vrndacc1x2 < voutput_min_less_zero_point) ? voutput_min_less_zero_point : vrndacc1x2;
- vrndacc1x3 = XNN_UNPREDICTABLE(vrndacc1x3 < voutput_min_less_zero_point) ? voutput_min_less_zero_point : vrndacc1x3;
- vrndacc2x0 = XNN_UNPREDICTABLE(vrndacc2x0 < voutput_min_less_zero_point) ? voutput_min_less_zero_point : vrndacc2x0;
- vrndacc2x1 = XNN_UNPREDICTABLE(vrndacc2x1 < voutput_min_less_zero_point) ? voutput_min_less_zero_point : vrndacc2x1;
- vrndacc2x2 = XNN_UNPREDICTABLE(vrndacc2x2 < voutput_min_less_zero_point) ? voutput_min_less_zero_point : vrndacc2x2;
- vrndacc2x3 = XNN_UNPREDICTABLE(vrndacc2x3 < voutput_min_less_zero_point) ? voutput_min_less_zero_point : vrndacc2x3;
- vrndacc3x0 = XNN_UNPREDICTABLE(vrndacc3x0 < voutput_min_less_zero_point) ? voutput_min_less_zero_point : vrndacc3x0;
- vrndacc3x1 = XNN_UNPREDICTABLE(vrndacc3x1 < voutput_min_less_zero_point) ? voutput_min_less_zero_point : vrndacc3x1;
- vrndacc3x2 = XNN_UNPREDICTABLE(vrndacc3x2 < voutput_min_less_zero_point) ? voutput_min_less_zero_point : vrndacc3x2;
- vrndacc3x3 = XNN_UNPREDICTABLE(vrndacc3x3 < voutput_min_less_zero_point) ? voutput_min_less_zero_point : vrndacc3x3;
-
- const long voutput_max_less_zero_point = params->scalar_lrint.output_max_less_zero_point;
- vrndacc0x0 = XNN_UNPREDICTABLE(vrndacc0x0 > voutput_max_less_zero_point) ? voutput_max_less_zero_point : vrndacc0x0;
- vrndacc0x1 = XNN_UNPREDICTABLE(vrndacc0x1 > voutput_max_less_zero_point) ? voutput_max_less_zero_point : vrndacc0x1;
- vrndacc0x2 = XNN_UNPREDICTABLE(vrndacc0x2 > voutput_max_less_zero_point) ? voutput_max_less_zero_point : vrndacc0x2;
- vrndacc0x3 = XNN_UNPREDICTABLE(vrndacc0x3 > voutput_max_less_zero_point) ? voutput_max_less_zero_point : vrndacc0x3;
- vrndacc1x0 = XNN_UNPREDICTABLE(vrndacc1x0 > voutput_max_less_zero_point) ? voutput_max_less_zero_point : vrndacc1x0;
- vrndacc1x1 = XNN_UNPREDICTABLE(vrndacc1x1 > voutput_max_less_zero_point) ? voutput_max_less_zero_point : vrndacc1x1;
- vrndacc1x2 = XNN_UNPREDICTABLE(vrndacc1x2 > voutput_max_less_zero_point) ? voutput_max_less_zero_point : vrndacc1x2;
- vrndacc1x3 = XNN_UNPREDICTABLE(vrndacc1x3 > voutput_max_less_zero_point) ? voutput_max_less_zero_point : vrndacc1x3;
- vrndacc2x0 = XNN_UNPREDICTABLE(vrndacc2x0 > voutput_max_less_zero_point) ? voutput_max_less_zero_point : vrndacc2x0;
- vrndacc2x1 = XNN_UNPREDICTABLE(vrndacc2x1 > voutput_max_less_zero_point) ? voutput_max_less_zero_point : vrndacc2x1;
- vrndacc2x2 = XNN_UNPREDICTABLE(vrndacc2x2 > voutput_max_less_zero_point) ? voutput_max_less_zero_point : vrndacc2x2;
- vrndacc2x3 = XNN_UNPREDICTABLE(vrndacc2x3 > voutput_max_less_zero_point) ? voutput_max_less_zero_point : vrndacc2x3;
- vrndacc3x0 = XNN_UNPREDICTABLE(vrndacc3x0 > voutput_max_less_zero_point) ? voutput_max_less_zero_point : vrndacc3x0;
- vrndacc3x1 = XNN_UNPREDICTABLE(vrndacc3x1 > voutput_max_less_zero_point) ? voutput_max_less_zero_point : vrndacc3x1;
- vrndacc3x2 = XNN_UNPREDICTABLE(vrndacc3x2 > voutput_max_less_zero_point) ? voutput_max_less_zero_point : vrndacc3x2;
- vrndacc3x3 = XNN_UNPREDICTABLE(vrndacc3x3 > voutput_max_less_zero_point) ? voutput_max_less_zero_point : vrndacc3x3;
-
- const int32_t voutput_zero_point = params->scalar_lrint.output_zero_point;
- int32_t vout0x0 = (int32_t) vrndacc0x0 + voutput_zero_point;
- int32_t vout0x1 = (int32_t) vrndacc0x1 + voutput_zero_point;
- int32_t vout0x2 = (int32_t) vrndacc0x2 + voutput_zero_point;
- int32_t vout0x3 = (int32_t) vrndacc0x3 + voutput_zero_point;
- int32_t vout1x0 = (int32_t) vrndacc1x0 + voutput_zero_point;
- int32_t vout1x1 = (int32_t) vrndacc1x1 + voutput_zero_point;
- int32_t vout1x2 = (int32_t) vrndacc1x2 + voutput_zero_point;
- int32_t vout1x3 = (int32_t) vrndacc1x3 + voutput_zero_point;
- int32_t vout2x0 = (int32_t) vrndacc2x0 + voutput_zero_point;
- int32_t vout2x1 = (int32_t) vrndacc2x1 + voutput_zero_point;
- int32_t vout2x2 = (int32_t) vrndacc2x2 + voutput_zero_point;
- int32_t vout2x3 = (int32_t) vrndacc2x3 + voutput_zero_point;
- int32_t vout3x0 = (int32_t) vrndacc3x0 + voutput_zero_point;
- int32_t vout3x1 = (int32_t) vrndacc3x1 + voutput_zero_point;
- int32_t vout3x2 = (int32_t) vrndacc3x2 + voutput_zero_point;
- int32_t vout3x3 = (int32_t) vrndacc3x3 + voutput_zero_point;
-
- if XNN_LIKELY(nc >= 4) {
- c0[0] = (int8_t) vout0x0;
- c0[1] = (int8_t) vout0x1;
- c0[2] = (int8_t) vout0x2;
- c0[3] = (int8_t) vout0x3;
- c1[0] = (int8_t) vout1x0;
- c1[1] = (int8_t) vout1x1;
- c1[2] = (int8_t) vout1x2;
- c1[3] = (int8_t) vout1x3;
- c2[0] = (int8_t) vout2x0;
- c2[1] = (int8_t) vout2x1;
- c2[2] = (int8_t) vout2x2;
- c2[3] = (int8_t) vout2x3;
- c3[0] = (int8_t) vout3x0;
- c3[1] = (int8_t) vout3x1;
- c3[2] = (int8_t) vout3x2;
- c3[3] = (int8_t) vout3x3;
-
- a0 = (const int8_t*) ((uintptr_t) a0 - kc);
- a1 = (const int8_t*) ((uintptr_t) a1 - kc);
- a2 = (const int8_t*) ((uintptr_t) a2 - kc);
- a3 = (const int8_t*) ((uintptr_t) a3 - kc);
-
- c0 = (int8_t*) ((uintptr_t) c0 + cn_stride);
- c1 = (int8_t*) ((uintptr_t) c1 + cn_stride);
- c2 = (int8_t*) ((uintptr_t) c2 + cn_stride);
- c3 = (int8_t*) ((uintptr_t) c3 + cn_stride);
-
- nc -= 4;
- } else {
- if (nc & 2) {
- c0[0] = (int8_t) vout0x0;
- c0[1] = (int8_t) vout0x1;
- vout0x0 = vout0x2;
- c0 += 2;
- c1[0] = (int8_t) vout1x0;
- c1[1] = (int8_t) vout1x1;
- vout1x0 = vout1x2;
- c1 += 2;
- c2[0] = (int8_t) vout2x0;
- c2[1] = (int8_t) vout2x1;
- vout2x0 = vout2x2;
- c2 += 2;
- c3[0] = (int8_t) vout3x0;
- c3[1] = (int8_t) vout3x1;
- vout3x0 = vout3x2;
- c3 += 2;
- }
- if (nc & 1) {
- c0[0] = (int8_t) vout0x0;
- c1[0] = (int8_t) vout1x0;
- c2[0] = (int8_t) vout2x0;
- c3[0] = (int8_t) vout3x0;
- }
-
- nc = 0;
- }
- } while (nc != 0);
-}
diff --git a/src/qc8-gemm/gen/4x4-minmax-fp32-scalar-magic.c b/src/qc8-gemm/gen/4x4-minmax-fp32-scalar-lrintf.c
similarity index 76%
copy from src/qc8-gemm/gen/4x4-minmax-fp32-scalar-magic.c
copy to src/qc8-gemm/gen/4x4-minmax-fp32-scalar-lrintf.c
index 9483acb..0664db3 100644
--- a/src/qc8-gemm/gen/4x4-minmax-fp32-scalar-magic.c
+++ b/src/qc8-gemm/gen/4x4-minmax-fp32-scalar-lrintf.c
@@ -8,14 +8,13 @@
// LICENSE file in the root directory of this source tree.
#include <assert.h>
-
-#include <fp16.h>
+#include <math.h>
#include <xnnpack/math.h>
#include <xnnpack/gemm.h>
-void xnn_qc8_gemm_minmax_fp32_ukernel_4x4__scalar_magic(
+void xnn_qc8_gemm_minmax_fp32_ukernel_4x4__scalar_lrintf(
size_t mr,
size_t nc,
size_t kc,
@@ -144,7 +143,7 @@
vfpacc3x3 *= vscale3;
w = (const void*) ((const float*) w + 4);
- const float voutput_min_less_zero_point = params->scalar_magic.output_min_less_zero_point;
+ const float voutput_min_less_zero_point = params->scalar_lrintf.output_min_less_zero_point;
vfpacc0x0 = math_max_f32(vfpacc0x0, voutput_min_less_zero_point);
vfpacc0x1 = math_max_f32(vfpacc0x1, voutput_min_less_zero_point);
vfpacc0x2 = math_max_f32(vfpacc0x2, voutput_min_less_zero_point);
@@ -162,7 +161,7 @@
vfpacc3x2 = math_max_f32(vfpacc3x2, voutput_min_less_zero_point);
vfpacc3x3 = math_max_f32(vfpacc3x3, voutput_min_less_zero_point);
- const float voutput_max_less_zero_point = params->scalar_magic.output_max_less_zero_point;
+ const float voutput_max_less_zero_point = params->scalar_lrintf.output_max_less_zero_point;
vfpacc0x0 = math_min_f32(vfpacc0x0, voutput_max_less_zero_point);
vfpacc0x1 = math_min_f32(vfpacc0x1, voutput_max_less_zero_point);
vfpacc0x2 = math_min_f32(vfpacc0x2, voutput_max_less_zero_point);
@@ -180,41 +179,40 @@
vfpacc3x2 = math_min_f32(vfpacc3x2, voutput_max_less_zero_point);
vfpacc3x3 = math_min_f32(vfpacc3x3, voutput_max_less_zero_point);
- const float vmagic_bias = params->scalar_magic.magic_bias;
- vfpacc0x0 += vmagic_bias;
- vfpacc0x1 += vmagic_bias;
- vfpacc0x2 += vmagic_bias;
- vfpacc0x3 += vmagic_bias;
- vfpacc1x0 += vmagic_bias;
- vfpacc1x1 += vmagic_bias;
- vfpacc1x2 += vmagic_bias;
- vfpacc1x3 += vmagic_bias;
- vfpacc2x0 += vmagic_bias;
- vfpacc2x1 += vmagic_bias;
- vfpacc2x2 += vmagic_bias;
- vfpacc2x3 += vmagic_bias;
- vfpacc3x0 += vmagic_bias;
- vfpacc3x1 += vmagic_bias;
- vfpacc3x2 += vmagic_bias;
- vfpacc3x3 += vmagic_bias;
+ const int32_t vrndacc0x0 = (int32_t) lrintf(vfpacc0x0);
+ const int32_t vrndacc0x1 = (int32_t) lrintf(vfpacc0x1);
+ const int32_t vrndacc0x2 = (int32_t) lrintf(vfpacc0x2);
+ const int32_t vrndacc0x3 = (int32_t) lrintf(vfpacc0x3);
+ const int32_t vrndacc1x0 = (int32_t) lrintf(vfpacc1x0);
+ const int32_t vrndacc1x1 = (int32_t) lrintf(vfpacc1x1);
+ const int32_t vrndacc1x2 = (int32_t) lrintf(vfpacc1x2);
+ const int32_t vrndacc1x3 = (int32_t) lrintf(vfpacc1x3);
+ const int32_t vrndacc2x0 = (int32_t) lrintf(vfpacc2x0);
+ const int32_t vrndacc2x1 = (int32_t) lrintf(vfpacc2x1);
+ const int32_t vrndacc2x2 = (int32_t) lrintf(vfpacc2x2);
+ const int32_t vrndacc2x3 = (int32_t) lrintf(vfpacc2x3);
+ const int32_t vrndacc3x0 = (int32_t) lrintf(vfpacc3x0);
+ const int32_t vrndacc3x1 = (int32_t) lrintf(vfpacc3x1);
+ const int32_t vrndacc3x2 = (int32_t) lrintf(vfpacc3x2);
+ const int32_t vrndacc3x3 = (int32_t) lrintf(vfpacc3x3);
- const int32_t vmagic_bias_less_output_zero_point = params->scalar_magic.magic_bias_less_output_zero_point;
- int32_t vout0x0 = (int32_t) fp32_to_bits(vfpacc0x0) - vmagic_bias_less_output_zero_point;
- int32_t vout0x1 = (int32_t) fp32_to_bits(vfpacc0x1) - vmagic_bias_less_output_zero_point;
- int32_t vout0x2 = (int32_t) fp32_to_bits(vfpacc0x2) - vmagic_bias_less_output_zero_point;
- int32_t vout0x3 = (int32_t) fp32_to_bits(vfpacc0x3) - vmagic_bias_less_output_zero_point;
- int32_t vout1x0 = (int32_t) fp32_to_bits(vfpacc1x0) - vmagic_bias_less_output_zero_point;
- int32_t vout1x1 = (int32_t) fp32_to_bits(vfpacc1x1) - vmagic_bias_less_output_zero_point;
- int32_t vout1x2 = (int32_t) fp32_to_bits(vfpacc1x2) - vmagic_bias_less_output_zero_point;
- int32_t vout1x3 = (int32_t) fp32_to_bits(vfpacc1x3) - vmagic_bias_less_output_zero_point;
- int32_t vout2x0 = (int32_t) fp32_to_bits(vfpacc2x0) - vmagic_bias_less_output_zero_point;
- int32_t vout2x1 = (int32_t) fp32_to_bits(vfpacc2x1) - vmagic_bias_less_output_zero_point;
- int32_t vout2x2 = (int32_t) fp32_to_bits(vfpacc2x2) - vmagic_bias_less_output_zero_point;
- int32_t vout2x3 = (int32_t) fp32_to_bits(vfpacc2x3) - vmagic_bias_less_output_zero_point;
- int32_t vout3x0 = (int32_t) fp32_to_bits(vfpacc3x0) - vmagic_bias_less_output_zero_point;
- int32_t vout3x1 = (int32_t) fp32_to_bits(vfpacc3x1) - vmagic_bias_less_output_zero_point;
- int32_t vout3x2 = (int32_t) fp32_to_bits(vfpacc3x2) - vmagic_bias_less_output_zero_point;
- int32_t vout3x3 = (int32_t) fp32_to_bits(vfpacc3x3) - vmagic_bias_less_output_zero_point;
+ const int32_t voutput_zero_point = params->scalar_lrintf.output_zero_point;
+ int32_t vout0x0 = vrndacc0x0 + voutput_zero_point;
+ int32_t vout0x1 = vrndacc0x1 + voutput_zero_point;
+ int32_t vout0x2 = vrndacc0x2 + voutput_zero_point;
+ int32_t vout0x3 = vrndacc0x3 + voutput_zero_point;
+ int32_t vout1x0 = vrndacc1x0 + voutput_zero_point;
+ int32_t vout1x1 = vrndacc1x1 + voutput_zero_point;
+ int32_t vout1x2 = vrndacc1x2 + voutput_zero_point;
+ int32_t vout1x3 = vrndacc1x3 + voutput_zero_point;
+ int32_t vout2x0 = vrndacc2x0 + voutput_zero_point;
+ int32_t vout2x1 = vrndacc2x1 + voutput_zero_point;
+ int32_t vout2x2 = vrndacc2x2 + voutput_zero_point;
+ int32_t vout2x3 = vrndacc2x3 + voutput_zero_point;
+ int32_t vout3x0 = vrndacc3x0 + voutput_zero_point;
+ int32_t vout3x1 = vrndacc3x1 + voutput_zero_point;
+ int32_t vout3x2 = vrndacc3x2 + voutput_zero_point;
+ int32_t vout3x3 = vrndacc3x3 + voutput_zero_point;
if XNN_LIKELY(nc >= 4) {
c0[0] = (int8_t) vout0x0;
diff --git a/src/qc8-igemm/gen/1x2-minmax-fp32-scalar-magic.c b/src/qc8-igemm/gen/1x2-minmax-fp32-scalar-fmagic.c
similarity index 88%
rename from src/qc8-igemm/gen/1x2-minmax-fp32-scalar-magic.c
rename to src/qc8-igemm/gen/1x2-minmax-fp32-scalar-fmagic.c
index b10decc..0f461f2 100644
--- a/src/qc8-igemm/gen/1x2-minmax-fp32-scalar-magic.c
+++ b/src/qc8-igemm/gen/1x2-minmax-fp32-scalar-fmagic.c
@@ -15,7 +15,7 @@
#include <xnnpack/gemm.h>
-void xnn_qc8_igemm_minmax_fp32_ukernel_1x2__scalar_magic(
+void xnn_qc8_igemm_minmax_fp32_ukernel_1x2__scalar_fmagic(
size_t mr,
size_t nc,
size_t kc,
@@ -81,19 +81,19 @@
vfpacc0x1 *= vscale1;
w = (const void*) ((const float*) w + 2);
- const float voutput_min_less_zero_point = params->scalar_magic.output_min_less_zero_point;
+ const float voutput_min_less_zero_point = params->scalar_fmagic.output_min_less_zero_point;
vfpacc0x0 = math_max_f32(vfpacc0x0, voutput_min_less_zero_point);
vfpacc0x1 = math_max_f32(vfpacc0x1, voutput_min_less_zero_point);
- const float voutput_max_less_zero_point = params->scalar_magic.output_max_less_zero_point;
+ const float voutput_max_less_zero_point = params->scalar_fmagic.output_max_less_zero_point;
vfpacc0x0 = math_min_f32(vfpacc0x0, voutput_max_less_zero_point);
vfpacc0x1 = math_min_f32(vfpacc0x1, voutput_max_less_zero_point);
- const float vmagic_bias = params->scalar_magic.magic_bias;
+ const float vmagic_bias = params->scalar_fmagic.magic_bias;
vfpacc0x0 += vmagic_bias;
vfpacc0x1 += vmagic_bias;
- const int32_t vmagic_bias_less_output_zero_point = params->scalar_magic.magic_bias_less_output_zero_point;
+ const int32_t vmagic_bias_less_output_zero_point = params->scalar_fmagic.magic_bias_less_output_zero_point;
int32_t vout0x0 = (int32_t) fp32_to_bits(vfpacc0x0) - vmagic_bias_less_output_zero_point;
int32_t vout0x1 = (int32_t) fp32_to_bits(vfpacc0x1) - vmagic_bias_less_output_zero_point;
diff --git a/src/qc8-igemm/gen/1x2-minmax-fp32-scalar-lrint.c b/src/qc8-igemm/gen/1x2-minmax-fp32-scalar-lrint.c
deleted file mode 100644
index 755f901..0000000
--- a/src/qc8-igemm/gen/1x2-minmax-fp32-scalar-lrint.c
+++ /dev/null
@@ -1,114 +0,0 @@
-// Auto-generated file. Do not edit!
-// Template: src/qs8-igemm/scalar.c.in
-// Generator: tools/xngen
-//
-// Copyright 2021 Google LLC
-//
-// This source code is licensed under the BSD-style license found in the
-// LICENSE file in the root directory of this source tree.
-
-#include <assert.h>
-#include <math.h>
-
-#include <xnnpack/math.h>
-#include <xnnpack/gemm.h>
-
-
-void xnn_qc8_igemm_minmax_fp32_ukernel_1x2__scalar_lrint(
- size_t mr,
- size_t nc,
- size_t kc,
- size_t ks,
- const int8_t**restrict a,
- const void*restrict w,
- int8_t*restrict c,
- size_t cm_stride,
- size_t cn_stride,
- size_t a_offset,
- const int8_t* zero,
- const union xnn_qs8_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
-{
- assert(mr != 0);
- assert(mr <= 1);
- assert(nc != 0);
- assert(kc != 0);
- assert(ks != 0);
- assert(ks % (1 * sizeof(void*)) == 0);
- assert(a != NULL);
- assert(w != NULL);
- assert(c != NULL);
-
- int8_t* c0 = c;
-
- do {
- int32_t vacc0x0 = ((const int32_t*) w)[0];
- int32_t vacc0x1 = ((const int32_t*) w)[1];
- w = (const void*) ((const int32_t*) w + 2);
-
- size_t p = ks;
- do {
- const int8_t* restrict a0 = a[0];
- assert(a0 != NULL);
- if XNN_UNPREDICTABLE(a0 != zero) {
- a0 = (const int8_t*) ((uintptr_t) a0 + a_offset);
- }
- a += 1;
-
- size_t k = kc;
- do {
- const int32_t va0 = (int32_t) *a0++;
-
- const int32_t vb0 = (int32_t) ((const int8_t*) w)[0];
- const int32_t vb1 = (int32_t) ((const int8_t*) w)[1];
- w = (const void*) ((const int8_t*) w + 2);
-
- vacc0x0 += va0 * vb0;
- vacc0x1 += va0 * vb1;
-
- k -= sizeof(int8_t);
- } while (k != 0);
- p -= 1 * sizeof(void*);
- } while (p != 0);
-
- float vfpacc0x0 = (float) vacc0x0;
- float vfpacc0x1 = (float) vacc0x1;
-
- typedef XNN_UNALIGNED float unaligned_float;
- const float vscale0 = ((const unaligned_float*) w)[0];
- vfpacc0x0 *= vscale0;
- const float vscale1 = ((const unaligned_float*) w)[1];
- vfpacc0x1 *= vscale1;
- w = (const void*) ((const float*) w + 2);
-
- long vrndacc0x0 = lrintf(vfpacc0x0);
- long vrndacc0x1 = lrintf(vfpacc0x1);
-
- const long voutput_min_less_zero_point = params->scalar_lrint.output_min_less_zero_point;
- vrndacc0x0 = XNN_UNPREDICTABLE(vrndacc0x0 < voutput_min_less_zero_point) ? voutput_min_less_zero_point : vrndacc0x0;
- vrndacc0x1 = XNN_UNPREDICTABLE(vrndacc0x1 < voutput_min_less_zero_point) ? voutput_min_less_zero_point : vrndacc0x1;
-
- const long voutput_max_less_zero_point = params->scalar_lrint.output_max_less_zero_point;
- vrndacc0x0 = XNN_UNPREDICTABLE(vrndacc0x0 > voutput_max_less_zero_point) ? voutput_max_less_zero_point : vrndacc0x0;
- vrndacc0x1 = XNN_UNPREDICTABLE(vrndacc0x1 > voutput_max_less_zero_point) ? voutput_max_less_zero_point : vrndacc0x1;
-
- const int32_t voutput_zero_point = params->scalar_lrint.output_zero_point;
- int32_t vout0x0 = (int32_t) vrndacc0x0 + voutput_zero_point;
- int32_t vout0x1 = (int32_t) vrndacc0x1 + voutput_zero_point;
-
- if XNN_LIKELY(nc >= 2) {
- c0[0] = (int8_t) vout0x0;
- c0[1] = (int8_t) vout0x1;
-
- c0 = (int8_t*) ((uintptr_t) c0 + cn_stride);
-
- a = (const int8_t**restrict) ((uintptr_t) a - ks);
- nc -= 2;
- } else {
- if (nc & 1) {
- c0[0] = (int8_t) vout0x0;
- }
-
- nc = 0;
- }
- } while (nc != 0);
-}
diff --git a/src/qc8-igemm/gen/1x2-minmax-fp32-scalar-magic.c b/src/qc8-igemm/gen/1x2-minmax-fp32-scalar-lrintf.c
similarity index 78%
copy from src/qc8-igemm/gen/1x2-minmax-fp32-scalar-magic.c
copy to src/qc8-igemm/gen/1x2-minmax-fp32-scalar-lrintf.c
index b10decc..db3964d 100644
--- a/src/qc8-igemm/gen/1x2-minmax-fp32-scalar-magic.c
+++ b/src/qc8-igemm/gen/1x2-minmax-fp32-scalar-lrintf.c
@@ -8,14 +8,13 @@
// LICENSE file in the root directory of this source tree.
#include <assert.h>
-
-#include <fp16.h>
+#include <math.h>
#include <xnnpack/math.h>
#include <xnnpack/gemm.h>
-void xnn_qc8_igemm_minmax_fp32_ukernel_1x2__scalar_magic(
+void xnn_qc8_igemm_minmax_fp32_ukernel_1x2__scalar_lrintf(
size_t mr,
size_t nc,
size_t kc,
@@ -81,21 +80,20 @@
vfpacc0x1 *= vscale1;
w = (const void*) ((const float*) w + 2);
- const float voutput_min_less_zero_point = params->scalar_magic.output_min_less_zero_point;
+ const float voutput_min_less_zero_point = params->scalar_lrintf.output_min_less_zero_point;
vfpacc0x0 = math_max_f32(vfpacc0x0, voutput_min_less_zero_point);
vfpacc0x1 = math_max_f32(vfpacc0x1, voutput_min_less_zero_point);
- const float voutput_max_less_zero_point = params->scalar_magic.output_max_less_zero_point;
+ const float voutput_max_less_zero_point = params->scalar_lrintf.output_max_less_zero_point;
vfpacc0x0 = math_min_f32(vfpacc0x0, voutput_max_less_zero_point);
vfpacc0x1 = math_min_f32(vfpacc0x1, voutput_max_less_zero_point);
- const float vmagic_bias = params->scalar_magic.magic_bias;
- vfpacc0x0 += vmagic_bias;
- vfpacc0x1 += vmagic_bias;
+ const int32_t vrndacc0x0 = (int32_t) lrintf(vfpacc0x0);
+ const int32_t vrndacc0x1 = (int32_t) lrintf(vfpacc0x1);
- const int32_t vmagic_bias_less_output_zero_point = params->scalar_magic.magic_bias_less_output_zero_point;
- int32_t vout0x0 = (int32_t) fp32_to_bits(vfpacc0x0) - vmagic_bias_less_output_zero_point;
- int32_t vout0x1 = (int32_t) fp32_to_bits(vfpacc0x1) - vmagic_bias_less_output_zero_point;
+ const int32_t voutput_zero_point = params->scalar_lrintf.output_zero_point;
+ int32_t vout0x0 = vrndacc0x0 + voutput_zero_point;
+ int32_t vout0x1 = vrndacc0x1 + voutput_zero_point;
if XNN_LIKELY(nc >= 2) {
c0[0] = (int8_t) vout0x0;
diff --git a/src/qc8-igemm/gen/1x4-minmax-fp32-scalar-magic.c b/src/qc8-igemm/gen/1x4-minmax-fp32-scalar-fmagic.c
similarity index 91%
rename from src/qc8-igemm/gen/1x4-minmax-fp32-scalar-magic.c
rename to src/qc8-igemm/gen/1x4-minmax-fp32-scalar-fmagic.c
index f738ddb..70a42b2 100644
--- a/src/qc8-igemm/gen/1x4-minmax-fp32-scalar-magic.c
+++ b/src/qc8-igemm/gen/1x4-minmax-fp32-scalar-fmagic.c
@@ -15,7 +15,7 @@
#include <xnnpack/gemm.h>
-void xnn_qc8_igemm_minmax_fp32_ukernel_1x4__scalar_magic(
+void xnn_qc8_igemm_minmax_fp32_ukernel_1x4__scalar_fmagic(
size_t mr,
size_t nc,
size_t kc,
@@ -92,25 +92,25 @@
vfpacc0x3 *= vscale3;
w = (const void*) ((const float*) w + 4);
- const float voutput_min_less_zero_point = params->scalar_magic.output_min_less_zero_point;
+ const float voutput_min_less_zero_point = params->scalar_fmagic.output_min_less_zero_point;
vfpacc0x0 = math_max_f32(vfpacc0x0, voutput_min_less_zero_point);
vfpacc0x1 = math_max_f32(vfpacc0x1, voutput_min_less_zero_point);
vfpacc0x2 = math_max_f32(vfpacc0x2, voutput_min_less_zero_point);
vfpacc0x3 = math_max_f32(vfpacc0x3, voutput_min_less_zero_point);
- const float voutput_max_less_zero_point = params->scalar_magic.output_max_less_zero_point;
+ const float voutput_max_less_zero_point = params->scalar_fmagic.output_max_less_zero_point;
vfpacc0x0 = math_min_f32(vfpacc0x0, voutput_max_less_zero_point);
vfpacc0x1 = math_min_f32(vfpacc0x1, voutput_max_less_zero_point);
vfpacc0x2 = math_min_f32(vfpacc0x2, voutput_max_less_zero_point);
vfpacc0x3 = math_min_f32(vfpacc0x3, voutput_max_less_zero_point);
- const float vmagic_bias = params->scalar_magic.magic_bias;
+ const float vmagic_bias = params->scalar_fmagic.magic_bias;
vfpacc0x0 += vmagic_bias;
vfpacc0x1 += vmagic_bias;
vfpacc0x2 += vmagic_bias;
vfpacc0x3 += vmagic_bias;
- const int32_t vmagic_bias_less_output_zero_point = params->scalar_magic.magic_bias_less_output_zero_point;
+ const int32_t vmagic_bias_less_output_zero_point = params->scalar_fmagic.magic_bias_less_output_zero_point;
int32_t vout0x0 = (int32_t) fp32_to_bits(vfpacc0x0) - vmagic_bias_less_output_zero_point;
int32_t vout0x1 = (int32_t) fp32_to_bits(vfpacc0x1) - vmagic_bias_less_output_zero_point;
int32_t vout0x2 = (int32_t) fp32_to_bits(vfpacc0x2) - vmagic_bias_less_output_zero_point;
diff --git a/src/qc8-igemm/gen/1x4-minmax-fp32-scalar-lrint.c b/src/qc8-igemm/gen/1x4-minmax-fp32-scalar-lrint.c
deleted file mode 100644
index a632b3e..0000000
--- a/src/qc8-igemm/gen/1x4-minmax-fp32-scalar-lrint.c
+++ /dev/null
@@ -1,141 +0,0 @@
-// Auto-generated file. Do not edit!
-// Template: src/qs8-igemm/scalar.c.in
-// Generator: tools/xngen
-//
-// Copyright 2021 Google LLC
-//
-// This source code is licensed under the BSD-style license found in the
-// LICENSE file in the root directory of this source tree.
-
-#include <assert.h>
-#include <math.h>
-
-#include <xnnpack/math.h>
-#include <xnnpack/gemm.h>
-
-
-void xnn_qc8_igemm_minmax_fp32_ukernel_1x4__scalar_lrint(
- size_t mr,
- size_t nc,
- size_t kc,
- size_t ks,
- const int8_t**restrict a,
- const void*restrict w,
- int8_t*restrict c,
- size_t cm_stride,
- size_t cn_stride,
- size_t a_offset,
- const int8_t* zero,
- const union xnn_qs8_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
-{
- assert(mr != 0);
- assert(mr <= 1);
- assert(nc != 0);
- assert(kc != 0);
- assert(ks != 0);
- assert(ks % (1 * sizeof(void*)) == 0);
- assert(a != NULL);
- assert(w != NULL);
- assert(c != NULL);
-
- int8_t* c0 = c;
-
- do {
- int32_t vacc0x0 = ((const int32_t*) w)[0];
- int32_t vacc0x1 = ((const int32_t*) w)[1];
- int32_t vacc0x2 = ((const int32_t*) w)[2];
- int32_t vacc0x3 = ((const int32_t*) w)[3];
- w = (const void*) ((const int32_t*) w + 4);
-
- size_t p = ks;
- do {
- const int8_t* restrict a0 = a[0];
- assert(a0 != NULL);
- if XNN_UNPREDICTABLE(a0 != zero) {
- a0 = (const int8_t*) ((uintptr_t) a0 + a_offset);
- }
- a += 1;
-
- size_t k = kc;
- do {
- const int32_t va0 = (int32_t) *a0++;
-
- const int32_t vb0 = (int32_t) ((const int8_t*) w)[0];
- const int32_t vb1 = (int32_t) ((const int8_t*) w)[1];
- const int32_t vb2 = (int32_t) ((const int8_t*) w)[2];
- const int32_t vb3 = (int32_t) ((const int8_t*) w)[3];
- w = (const void*) ((const int8_t*) w + 4);
-
- vacc0x0 += va0 * vb0;
- vacc0x1 += va0 * vb1;
- vacc0x2 += va0 * vb2;
- vacc0x3 += va0 * vb3;
-
- k -= sizeof(int8_t);
- } while (k != 0);
- p -= 1 * sizeof(void*);
- } while (p != 0);
-
- float vfpacc0x0 = (float) vacc0x0;
- float vfpacc0x1 = (float) vacc0x1;
- float vfpacc0x2 = (float) vacc0x2;
- float vfpacc0x3 = (float) vacc0x3;
-
- const float vscale0 = ((const float*) w)[0];
- vfpacc0x0 *= vscale0;
- const float vscale1 = ((const float*) w)[1];
- vfpacc0x1 *= vscale1;
- const float vscale2 = ((const float*) w)[2];
- vfpacc0x2 *= vscale2;
- const float vscale3 = ((const float*) w)[3];
- vfpacc0x3 *= vscale3;
- w = (const void*) ((const float*) w + 4);
-
- long vrndacc0x0 = lrintf(vfpacc0x0);
- long vrndacc0x1 = lrintf(vfpacc0x1);
- long vrndacc0x2 = lrintf(vfpacc0x2);
- long vrndacc0x3 = lrintf(vfpacc0x3);
-
- const long voutput_min_less_zero_point = params->scalar_lrint.output_min_less_zero_point;
- vrndacc0x0 = XNN_UNPREDICTABLE(vrndacc0x0 < voutput_min_less_zero_point) ? voutput_min_less_zero_point : vrndacc0x0;
- vrndacc0x1 = XNN_UNPREDICTABLE(vrndacc0x1 < voutput_min_less_zero_point) ? voutput_min_less_zero_point : vrndacc0x1;
- vrndacc0x2 = XNN_UNPREDICTABLE(vrndacc0x2 < voutput_min_less_zero_point) ? voutput_min_less_zero_point : vrndacc0x2;
- vrndacc0x3 = XNN_UNPREDICTABLE(vrndacc0x3 < voutput_min_less_zero_point) ? voutput_min_less_zero_point : vrndacc0x3;
-
- const long voutput_max_less_zero_point = params->scalar_lrint.output_max_less_zero_point;
- vrndacc0x0 = XNN_UNPREDICTABLE(vrndacc0x0 > voutput_max_less_zero_point) ? voutput_max_less_zero_point : vrndacc0x0;
- vrndacc0x1 = XNN_UNPREDICTABLE(vrndacc0x1 > voutput_max_less_zero_point) ? voutput_max_less_zero_point : vrndacc0x1;
- vrndacc0x2 = XNN_UNPREDICTABLE(vrndacc0x2 > voutput_max_less_zero_point) ? voutput_max_less_zero_point : vrndacc0x2;
- vrndacc0x3 = XNN_UNPREDICTABLE(vrndacc0x3 > voutput_max_less_zero_point) ? voutput_max_less_zero_point : vrndacc0x3;
-
- const int32_t voutput_zero_point = params->scalar_lrint.output_zero_point;
- int32_t vout0x0 = (int32_t) vrndacc0x0 + voutput_zero_point;
- int32_t vout0x1 = (int32_t) vrndacc0x1 + voutput_zero_point;
- int32_t vout0x2 = (int32_t) vrndacc0x2 + voutput_zero_point;
- int32_t vout0x3 = (int32_t) vrndacc0x3 + voutput_zero_point;
-
- if XNN_LIKELY(nc >= 4) {
- c0[0] = (int8_t) vout0x0;
- c0[1] = (int8_t) vout0x1;
- c0[2] = (int8_t) vout0x2;
- c0[3] = (int8_t) vout0x3;
-
- c0 = (int8_t*) ((uintptr_t) c0 + cn_stride);
-
- a = (const int8_t**restrict) ((uintptr_t) a - ks);
- nc -= 4;
- } else {
- if (nc & 2) {
- c0[0] = (int8_t) vout0x0;
- c0[1] = (int8_t) vout0x1;
- vout0x0 = vout0x2;
- c0 += 2;
- }
- if (nc & 1) {
- c0[0] = (int8_t) vout0x0;
- }
-
- nc = 0;
- }
- } while (nc != 0);
-}
diff --git a/src/qc8-igemm/gen/1x4-minmax-fp32-scalar-magic.c b/src/qc8-igemm/gen/1x4-minmax-fp32-scalar-lrintf.c
similarity index 78%
copy from src/qc8-igemm/gen/1x4-minmax-fp32-scalar-magic.c
copy to src/qc8-igemm/gen/1x4-minmax-fp32-scalar-lrintf.c
index f738ddb..614f571 100644
--- a/src/qc8-igemm/gen/1x4-minmax-fp32-scalar-magic.c
+++ b/src/qc8-igemm/gen/1x4-minmax-fp32-scalar-lrintf.c
@@ -8,14 +8,13 @@
// LICENSE file in the root directory of this source tree.
#include <assert.h>
-
-#include <fp16.h>
+#include <math.h>
#include <xnnpack/math.h>
#include <xnnpack/gemm.h>
-void xnn_qc8_igemm_minmax_fp32_ukernel_1x4__scalar_magic(
+void xnn_qc8_igemm_minmax_fp32_ukernel_1x4__scalar_lrintf(
size_t mr,
size_t nc,
size_t kc,
@@ -92,29 +91,28 @@
vfpacc0x3 *= vscale3;
w = (const void*) ((const float*) w + 4);
- const float voutput_min_less_zero_point = params->scalar_magic.output_min_less_zero_point;
+ const float voutput_min_less_zero_point = params->scalar_lrintf.output_min_less_zero_point;
vfpacc0x0 = math_max_f32(vfpacc0x0, voutput_min_less_zero_point);
vfpacc0x1 = math_max_f32(vfpacc0x1, voutput_min_less_zero_point);
vfpacc0x2 = math_max_f32(vfpacc0x2, voutput_min_less_zero_point);
vfpacc0x3 = math_max_f32(vfpacc0x3, voutput_min_less_zero_point);
- const float voutput_max_less_zero_point = params->scalar_magic.output_max_less_zero_point;
+ const float voutput_max_less_zero_point = params->scalar_lrintf.output_max_less_zero_point;
vfpacc0x0 = math_min_f32(vfpacc0x0, voutput_max_less_zero_point);
vfpacc0x1 = math_min_f32(vfpacc0x1, voutput_max_less_zero_point);
vfpacc0x2 = math_min_f32(vfpacc0x2, voutput_max_less_zero_point);
vfpacc0x3 = math_min_f32(vfpacc0x3, voutput_max_less_zero_point);
- const float vmagic_bias = params->scalar_magic.magic_bias;
- vfpacc0x0 += vmagic_bias;
- vfpacc0x1 += vmagic_bias;
- vfpacc0x2 += vmagic_bias;
- vfpacc0x3 += vmagic_bias;
+ const int32_t vrndacc0x0 = (int32_t) lrintf(vfpacc0x0);
+ const int32_t vrndacc0x1 = (int32_t) lrintf(vfpacc0x1);
+ const int32_t vrndacc0x2 = (int32_t) lrintf(vfpacc0x2);
+ const int32_t vrndacc0x3 = (int32_t) lrintf(vfpacc0x3);
- const int32_t vmagic_bias_less_output_zero_point = params->scalar_magic.magic_bias_less_output_zero_point;
- int32_t vout0x0 = (int32_t) fp32_to_bits(vfpacc0x0) - vmagic_bias_less_output_zero_point;
- int32_t vout0x1 = (int32_t) fp32_to_bits(vfpacc0x1) - vmagic_bias_less_output_zero_point;
- int32_t vout0x2 = (int32_t) fp32_to_bits(vfpacc0x2) - vmagic_bias_less_output_zero_point;
- int32_t vout0x3 = (int32_t) fp32_to_bits(vfpacc0x3) - vmagic_bias_less_output_zero_point;
+ const int32_t voutput_zero_point = params->scalar_lrintf.output_zero_point;
+ int32_t vout0x0 = vrndacc0x0 + voutput_zero_point;
+ int32_t vout0x1 = vrndacc0x1 + voutput_zero_point;
+ int32_t vout0x2 = vrndacc0x2 + voutput_zero_point;
+ int32_t vout0x3 = vrndacc0x3 + voutput_zero_point;
if XNN_LIKELY(nc >= 4) {
c0[0] = (int8_t) vout0x0;
diff --git a/src/qc8-igemm/gen/2x2-minmax-fp32-scalar-magic.c b/src/qc8-igemm/gen/2x2-minmax-fp32-scalar-fmagic.c
similarity index 91%
rename from src/qc8-igemm/gen/2x2-minmax-fp32-scalar-magic.c
rename to src/qc8-igemm/gen/2x2-minmax-fp32-scalar-fmagic.c
index fc24542..0224989 100644
--- a/src/qc8-igemm/gen/2x2-minmax-fp32-scalar-magic.c
+++ b/src/qc8-igemm/gen/2x2-minmax-fp32-scalar-fmagic.c
@@ -15,7 +15,7 @@
#include <xnnpack/gemm.h>
-void xnn_qc8_igemm_minmax_fp32_ukernel_2x2__scalar_magic(
+void xnn_qc8_igemm_minmax_fp32_ukernel_2x2__scalar_fmagic(
size_t mr,
size_t nc,
size_t kc,
@@ -99,25 +99,25 @@
vfpacc1x1 *= vscale1;
w = (const void*) ((const float*) w + 2);
- const float voutput_min_less_zero_point = params->scalar_magic.output_min_less_zero_point;
+ const float voutput_min_less_zero_point = params->scalar_fmagic.output_min_less_zero_point;
vfpacc0x0 = math_max_f32(vfpacc0x0, voutput_min_less_zero_point);
vfpacc0x1 = math_max_f32(vfpacc0x1, voutput_min_less_zero_point);
vfpacc1x0 = math_max_f32(vfpacc1x0, voutput_min_less_zero_point);
vfpacc1x1 = math_max_f32(vfpacc1x1, voutput_min_less_zero_point);
- const float voutput_max_less_zero_point = params->scalar_magic.output_max_less_zero_point;
+ const float voutput_max_less_zero_point = params->scalar_fmagic.output_max_less_zero_point;
vfpacc0x0 = math_min_f32(vfpacc0x0, voutput_max_less_zero_point);
vfpacc0x1 = math_min_f32(vfpacc0x1, voutput_max_less_zero_point);
vfpacc1x0 = math_min_f32(vfpacc1x0, voutput_max_less_zero_point);
vfpacc1x1 = math_min_f32(vfpacc1x1, voutput_max_less_zero_point);
- const float vmagic_bias = params->scalar_magic.magic_bias;
+ const float vmagic_bias = params->scalar_fmagic.magic_bias;
vfpacc0x0 += vmagic_bias;
vfpacc0x1 += vmagic_bias;
vfpacc1x0 += vmagic_bias;
vfpacc1x1 += vmagic_bias;
- const int32_t vmagic_bias_less_output_zero_point = params->scalar_magic.magic_bias_less_output_zero_point;
+ const int32_t vmagic_bias_less_output_zero_point = params->scalar_fmagic.magic_bias_less_output_zero_point;
int32_t vout0x0 = (int32_t) fp32_to_bits(vfpacc0x0) - vmagic_bias_less_output_zero_point;
int32_t vout0x1 = (int32_t) fp32_to_bits(vfpacc0x1) - vmagic_bias_less_output_zero_point;
int32_t vout1x0 = (int32_t) fp32_to_bits(vfpacc1x0) - vmagic_bias_less_output_zero_point;
diff --git a/src/qc8-igemm/gen/2x2-minmax-fp32-scalar-lrint.c b/src/qc8-igemm/gen/2x2-minmax-fp32-scalar-lrint.c
deleted file mode 100644
index 9209ecf..0000000
--- a/src/qc8-igemm/gen/2x2-minmax-fp32-scalar-lrint.c
+++ /dev/null
@@ -1,144 +0,0 @@
-// Auto-generated file. Do not edit!
-// Template: src/qs8-igemm/scalar.c.in
-// Generator: tools/xngen
-//
-// Copyright 2021 Google LLC
-//
-// This source code is licensed under the BSD-style license found in the
-// LICENSE file in the root directory of this source tree.
-
-#include <assert.h>
-#include <math.h>
-
-#include <xnnpack/math.h>
-#include <xnnpack/gemm.h>
-
-
-void xnn_qc8_igemm_minmax_fp32_ukernel_2x2__scalar_lrint(
- size_t mr,
- size_t nc,
- size_t kc,
- size_t ks,
- const int8_t**restrict a,
- const void*restrict w,
- int8_t*restrict c,
- size_t cm_stride,
- size_t cn_stride,
- size_t a_offset,
- const int8_t* zero,
- const union xnn_qs8_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
-{
- assert(mr != 0);
- assert(mr <= 2);
- assert(nc != 0);
- assert(kc != 0);
- assert(ks != 0);
- assert(ks % (2 * sizeof(void*)) == 0);
- assert(a != NULL);
- assert(w != NULL);
- assert(c != NULL);
-
- int8_t* c0 = c;
- int8_t* c1 = (int8_t*) ((uintptr_t) c0 + cm_stride);
- if XNN_UNPREDICTABLE(mr != 2) {
- c1 = c0;
- }
-
- do {
- int32_t vacc0x0 = ((const int32_t*) w)[0];
- int32_t vacc0x1 = ((const int32_t*) w)[1];
- int32_t vacc1x0 = vacc0x0;
- int32_t vacc1x1 = vacc0x1;
- w = (const void*) ((const int32_t*) w + 2);
-
- size_t p = ks;
- do {
- const int8_t* restrict a0 = a[0];
- assert(a0 != NULL);
- if XNN_UNPREDICTABLE(a0 != zero) {
- a0 = (const int8_t*) ((uintptr_t) a0 + a_offset);
- }
- const int8_t* restrict a1 = a[1];
- assert(a1 != NULL);
- if XNN_UNPREDICTABLE(a1 != zero) {
- a1 = (const int8_t*) ((uintptr_t) a1 + a_offset);
- }
- a += 2;
-
- size_t k = kc;
- do {
- const int32_t va0 = (int32_t) *a0++;
- const int32_t va1 = (int32_t) *a1++;
-
- const int32_t vb0 = (int32_t) ((const int8_t*) w)[0];
- const int32_t vb1 = (int32_t) ((const int8_t*) w)[1];
- w = (const void*) ((const int8_t*) w + 2);
-
- vacc0x0 += va0 * vb0;
- vacc0x1 += va0 * vb1;
- vacc1x0 += va1 * vb0;
- vacc1x1 += va1 * vb1;
-
- k -= sizeof(int8_t);
- } while (k != 0);
- p -= 2 * sizeof(void*);
- } while (p != 0);
-
- float vfpacc0x0 = (float) vacc0x0;
- float vfpacc0x1 = (float) vacc0x1;
- float vfpacc1x0 = (float) vacc1x0;
- float vfpacc1x1 = (float) vacc1x1;
-
- typedef XNN_UNALIGNED float unaligned_float;
- const float vscale0 = ((const unaligned_float*) w)[0];
- vfpacc0x0 *= vscale0;
- vfpacc1x0 *= vscale0;
- const float vscale1 = ((const unaligned_float*) w)[1];
- vfpacc0x1 *= vscale1;
- vfpacc1x1 *= vscale1;
- w = (const void*) ((const float*) w + 2);
-
- long vrndacc0x0 = lrintf(vfpacc0x0);
- long vrndacc0x1 = lrintf(vfpacc0x1);
- long vrndacc1x0 = lrintf(vfpacc1x0);
- long vrndacc1x1 = lrintf(vfpacc1x1);
-
- const long voutput_min_less_zero_point = params->scalar_lrint.output_min_less_zero_point;
- vrndacc0x0 = XNN_UNPREDICTABLE(vrndacc0x0 < voutput_min_less_zero_point) ? voutput_min_less_zero_point : vrndacc0x0;
- vrndacc0x1 = XNN_UNPREDICTABLE(vrndacc0x1 < voutput_min_less_zero_point) ? voutput_min_less_zero_point : vrndacc0x1;
- vrndacc1x0 = XNN_UNPREDICTABLE(vrndacc1x0 < voutput_min_less_zero_point) ? voutput_min_less_zero_point : vrndacc1x0;
- vrndacc1x1 = XNN_UNPREDICTABLE(vrndacc1x1 < voutput_min_less_zero_point) ? voutput_min_less_zero_point : vrndacc1x1;
-
- const long voutput_max_less_zero_point = params->scalar_lrint.output_max_less_zero_point;
- vrndacc0x0 = XNN_UNPREDICTABLE(vrndacc0x0 > voutput_max_less_zero_point) ? voutput_max_less_zero_point : vrndacc0x0;
- vrndacc0x1 = XNN_UNPREDICTABLE(vrndacc0x1 > voutput_max_less_zero_point) ? voutput_max_less_zero_point : vrndacc0x1;
- vrndacc1x0 = XNN_UNPREDICTABLE(vrndacc1x0 > voutput_max_less_zero_point) ? voutput_max_less_zero_point : vrndacc1x0;
- vrndacc1x1 = XNN_UNPREDICTABLE(vrndacc1x1 > voutput_max_less_zero_point) ? voutput_max_less_zero_point : vrndacc1x1;
-
- const int32_t voutput_zero_point = params->scalar_lrint.output_zero_point;
- int32_t vout0x0 = (int32_t) vrndacc0x0 + voutput_zero_point;
- int32_t vout0x1 = (int32_t) vrndacc0x1 + voutput_zero_point;
- int32_t vout1x0 = (int32_t) vrndacc1x0 + voutput_zero_point;
- int32_t vout1x1 = (int32_t) vrndacc1x1 + voutput_zero_point;
-
- if XNN_LIKELY(nc >= 2) {
- c1[0] = (int8_t) vout1x0;
- c1[1] = (int8_t) vout1x1;
- c0[0] = (int8_t) vout0x0;
- c0[1] = (int8_t) vout0x1;
-
- c1 = (int8_t*) ((uintptr_t) c1 + cn_stride);
- c0 = (int8_t*) ((uintptr_t) c0 + cn_stride);
-
- a = (const int8_t**restrict) ((uintptr_t) a - ks);
- nc -= 2;
- } else {
- if (nc & 1) {
- c1[0] = (int8_t) vout1x0;
- c0[0] = (int8_t) vout0x0;
- }
-
- nc = 0;
- }
- } while (nc != 0);
-}
diff --git a/src/qc8-igemm/gen/2x2-minmax-fp32-scalar-magic.c b/src/qc8-igemm/gen/2x2-minmax-fp32-scalar-lrintf.c
similarity index 79%
copy from src/qc8-igemm/gen/2x2-minmax-fp32-scalar-magic.c
copy to src/qc8-igemm/gen/2x2-minmax-fp32-scalar-lrintf.c
index fc24542..ced575c 100644
--- a/src/qc8-igemm/gen/2x2-minmax-fp32-scalar-magic.c
+++ b/src/qc8-igemm/gen/2x2-minmax-fp32-scalar-lrintf.c
@@ -8,14 +8,13 @@
// LICENSE file in the root directory of this source tree.
#include <assert.h>
-
-#include <fp16.h>
+#include <math.h>
#include <xnnpack/math.h>
#include <xnnpack/gemm.h>
-void xnn_qc8_igemm_minmax_fp32_ukernel_2x2__scalar_magic(
+void xnn_qc8_igemm_minmax_fp32_ukernel_2x2__scalar_lrintf(
size_t mr,
size_t nc,
size_t kc,
@@ -99,29 +98,28 @@
vfpacc1x1 *= vscale1;
w = (const void*) ((const float*) w + 2);
- const float voutput_min_less_zero_point = params->scalar_magic.output_min_less_zero_point;
+ const float voutput_min_less_zero_point = params->scalar_lrintf.output_min_less_zero_point;
vfpacc0x0 = math_max_f32(vfpacc0x0, voutput_min_less_zero_point);
vfpacc0x1 = math_max_f32(vfpacc0x1, voutput_min_less_zero_point);
vfpacc1x0 = math_max_f32(vfpacc1x0, voutput_min_less_zero_point);
vfpacc1x1 = math_max_f32(vfpacc1x1, voutput_min_less_zero_point);
- const float voutput_max_less_zero_point = params->scalar_magic.output_max_less_zero_point;
+ const float voutput_max_less_zero_point = params->scalar_lrintf.output_max_less_zero_point;
vfpacc0x0 = math_min_f32(vfpacc0x0, voutput_max_less_zero_point);
vfpacc0x1 = math_min_f32(vfpacc0x1, voutput_max_less_zero_point);
vfpacc1x0 = math_min_f32(vfpacc1x0, voutput_max_less_zero_point);
vfpacc1x1 = math_min_f32(vfpacc1x1, voutput_max_less_zero_point);
- const float vmagic_bias = params->scalar_magic.magic_bias;
- vfpacc0x0 += vmagic_bias;
- vfpacc0x1 += vmagic_bias;
- vfpacc1x0 += vmagic_bias;
- vfpacc1x1 += vmagic_bias;
+ const int32_t vrndacc0x0 = (int32_t) lrintf(vfpacc0x0);
+ const int32_t vrndacc0x1 = (int32_t) lrintf(vfpacc0x1);
+ const int32_t vrndacc1x0 = (int32_t) lrintf(vfpacc1x0);
+ const int32_t vrndacc1x1 = (int32_t) lrintf(vfpacc1x1);
- const int32_t vmagic_bias_less_output_zero_point = params->scalar_magic.magic_bias_less_output_zero_point;
- int32_t vout0x0 = (int32_t) fp32_to_bits(vfpacc0x0) - vmagic_bias_less_output_zero_point;
- int32_t vout0x1 = (int32_t) fp32_to_bits(vfpacc0x1) - vmagic_bias_less_output_zero_point;
- int32_t vout1x0 = (int32_t) fp32_to_bits(vfpacc1x0) - vmagic_bias_less_output_zero_point;
- int32_t vout1x1 = (int32_t) fp32_to_bits(vfpacc1x1) - vmagic_bias_less_output_zero_point;
+ const int32_t voutput_zero_point = params->scalar_lrintf.output_zero_point;
+ int32_t vout0x0 = vrndacc0x0 + voutput_zero_point;
+ int32_t vout0x1 = vrndacc0x1 + voutput_zero_point;
+ int32_t vout1x0 = vrndacc1x0 + voutput_zero_point;
+ int32_t vout1x1 = vrndacc1x1 + voutput_zero_point;
if XNN_LIKELY(nc >= 2) {
c1[0] = (int8_t) vout1x0;
diff --git a/src/qc8-igemm/gen/2x4-minmax-fp32-scalar-magic.c b/src/qc8-igemm/gen/2x4-minmax-fp32-scalar-fmagic.c
similarity index 94%
rename from src/qc8-igemm/gen/2x4-minmax-fp32-scalar-magic.c
rename to src/qc8-igemm/gen/2x4-minmax-fp32-scalar-fmagic.c
index 77db1b2..ed91c06 100644
--- a/src/qc8-igemm/gen/2x4-minmax-fp32-scalar-magic.c
+++ b/src/qc8-igemm/gen/2x4-minmax-fp32-scalar-fmagic.c
@@ -15,7 +15,7 @@
#include <xnnpack/gemm.h>
-void xnn_qc8_igemm_minmax_fp32_ukernel_2x4__scalar_magic(
+void xnn_qc8_igemm_minmax_fp32_ukernel_2x4__scalar_fmagic(
size_t mr,
size_t nc,
size_t kc,
@@ -118,7 +118,7 @@
vfpacc1x3 *= vscale3;
w = (const void*) ((const float*) w + 4);
- const float voutput_min_less_zero_point = params->scalar_magic.output_min_less_zero_point;
+ const float voutput_min_less_zero_point = params->scalar_fmagic.output_min_less_zero_point;
vfpacc0x0 = math_max_f32(vfpacc0x0, voutput_min_less_zero_point);
vfpacc0x1 = math_max_f32(vfpacc0x1, voutput_min_less_zero_point);
vfpacc0x2 = math_max_f32(vfpacc0x2, voutput_min_less_zero_point);
@@ -128,7 +128,7 @@
vfpacc1x2 = math_max_f32(vfpacc1x2, voutput_min_less_zero_point);
vfpacc1x3 = math_max_f32(vfpacc1x3, voutput_min_less_zero_point);
- const float voutput_max_less_zero_point = params->scalar_magic.output_max_less_zero_point;
+ const float voutput_max_less_zero_point = params->scalar_fmagic.output_max_less_zero_point;
vfpacc0x0 = math_min_f32(vfpacc0x0, voutput_max_less_zero_point);
vfpacc0x1 = math_min_f32(vfpacc0x1, voutput_max_less_zero_point);
vfpacc0x2 = math_min_f32(vfpacc0x2, voutput_max_less_zero_point);
@@ -138,7 +138,7 @@
vfpacc1x2 = math_min_f32(vfpacc1x2, voutput_max_less_zero_point);
vfpacc1x3 = math_min_f32(vfpacc1x3, voutput_max_less_zero_point);
- const float vmagic_bias = params->scalar_magic.magic_bias;
+ const float vmagic_bias = params->scalar_fmagic.magic_bias;
vfpacc0x0 += vmagic_bias;
vfpacc0x1 += vmagic_bias;
vfpacc0x2 += vmagic_bias;
@@ -148,7 +148,7 @@
vfpacc1x2 += vmagic_bias;
vfpacc1x3 += vmagic_bias;
- const int32_t vmagic_bias_less_output_zero_point = params->scalar_magic.magic_bias_less_output_zero_point;
+ const int32_t vmagic_bias_less_output_zero_point = params->scalar_fmagic.magic_bias_less_output_zero_point;
int32_t vout0x0 = (int32_t) fp32_to_bits(vfpacc0x0) - vmagic_bias_less_output_zero_point;
int32_t vout0x1 = (int32_t) fp32_to_bits(vfpacc0x1) - vmagic_bias_less_output_zero_point;
int32_t vout0x2 = (int32_t) fp32_to_bits(vfpacc0x2) - vmagic_bias_less_output_zero_point;
diff --git a/src/qc8-igemm/gen/2x4-minmax-fp32-scalar-lrint.c b/src/qc8-igemm/gen/2x4-minmax-fp32-scalar-lrint.c
deleted file mode 100644
index 50b5883..0000000
--- a/src/qc8-igemm/gen/2x4-minmax-fp32-scalar-lrint.c
+++ /dev/null
@@ -1,193 +0,0 @@
-// Auto-generated file. Do not edit!
-// Template: src/qs8-igemm/scalar.c.in
-// Generator: tools/xngen
-//
-// Copyright 2021 Google LLC
-//
-// This source code is licensed under the BSD-style license found in the
-// LICENSE file in the root directory of this source tree.
-
-#include <assert.h>
-#include <math.h>
-
-#include <xnnpack/math.h>
-#include <xnnpack/gemm.h>
-
-
-void xnn_qc8_igemm_minmax_fp32_ukernel_2x4__scalar_lrint(
- size_t mr,
- size_t nc,
- size_t kc,
- size_t ks,
- const int8_t**restrict a,
- const void*restrict w,
- int8_t*restrict c,
- size_t cm_stride,
- size_t cn_stride,
- size_t a_offset,
- const int8_t* zero,
- const union xnn_qs8_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
-{
- assert(mr != 0);
- assert(mr <= 2);
- assert(nc != 0);
- assert(kc != 0);
- assert(ks != 0);
- assert(ks % (2 * sizeof(void*)) == 0);
- assert(a != NULL);
- assert(w != NULL);
- assert(c != NULL);
-
- int8_t* c0 = c;
- int8_t* c1 = (int8_t*) ((uintptr_t) c0 + cm_stride);
- if XNN_UNPREDICTABLE(mr != 2) {
- c1 = c0;
- }
-
- do {
- int32_t vacc0x0 = ((const int32_t*) w)[0];
- int32_t vacc0x1 = ((const int32_t*) w)[1];
- int32_t vacc0x2 = ((const int32_t*) w)[2];
- int32_t vacc0x3 = ((const int32_t*) w)[3];
- int32_t vacc1x0 = vacc0x0;
- int32_t vacc1x1 = vacc0x1;
- int32_t vacc1x2 = vacc0x2;
- int32_t vacc1x3 = vacc0x3;
- w = (const void*) ((const int32_t*) w + 4);
-
- size_t p = ks;
- do {
- const int8_t* restrict a0 = a[0];
- assert(a0 != NULL);
- if XNN_UNPREDICTABLE(a0 != zero) {
- a0 = (const int8_t*) ((uintptr_t) a0 + a_offset);
- }
- const int8_t* restrict a1 = a[1];
- assert(a1 != NULL);
- if XNN_UNPREDICTABLE(a1 != zero) {
- a1 = (const int8_t*) ((uintptr_t) a1 + a_offset);
- }
- a += 2;
-
- size_t k = kc;
- do {
- const int32_t va0 = (int32_t) *a0++;
- const int32_t va1 = (int32_t) *a1++;
-
- const int32_t vb0 = (int32_t) ((const int8_t*) w)[0];
- const int32_t vb1 = (int32_t) ((const int8_t*) w)[1];
- const int32_t vb2 = (int32_t) ((const int8_t*) w)[2];
- const int32_t vb3 = (int32_t) ((const int8_t*) w)[3];
- w = (const void*) ((const int8_t*) w + 4);
-
- vacc0x0 += va0 * vb0;
- vacc0x1 += va0 * vb1;
- vacc0x2 += va0 * vb2;
- vacc0x3 += va0 * vb3;
- vacc1x0 += va1 * vb0;
- vacc1x1 += va1 * vb1;
- vacc1x2 += va1 * vb2;
- vacc1x3 += va1 * vb3;
-
- k -= sizeof(int8_t);
- } while (k != 0);
- p -= 2 * sizeof(void*);
- } while (p != 0);
-
- float vfpacc0x0 = (float) vacc0x0;
- float vfpacc0x1 = (float) vacc0x1;
- float vfpacc0x2 = (float) vacc0x2;
- float vfpacc0x3 = (float) vacc0x3;
- float vfpacc1x0 = (float) vacc1x0;
- float vfpacc1x1 = (float) vacc1x1;
- float vfpacc1x2 = (float) vacc1x2;
- float vfpacc1x3 = (float) vacc1x3;
-
- const float vscale0 = ((const float*) w)[0];
- vfpacc0x0 *= vscale0;
- vfpacc1x0 *= vscale0;
- const float vscale1 = ((const float*) w)[1];
- vfpacc0x1 *= vscale1;
- vfpacc1x1 *= vscale1;
- const float vscale2 = ((const float*) w)[2];
- vfpacc0x2 *= vscale2;
- vfpacc1x2 *= vscale2;
- const float vscale3 = ((const float*) w)[3];
- vfpacc0x3 *= vscale3;
- vfpacc1x3 *= vscale3;
- w = (const void*) ((const float*) w + 4);
-
- long vrndacc0x0 = lrintf(vfpacc0x0);
- long vrndacc0x1 = lrintf(vfpacc0x1);
- long vrndacc0x2 = lrintf(vfpacc0x2);
- long vrndacc0x3 = lrintf(vfpacc0x3);
- long vrndacc1x0 = lrintf(vfpacc1x0);
- long vrndacc1x1 = lrintf(vfpacc1x1);
- long vrndacc1x2 = lrintf(vfpacc1x2);
- long vrndacc1x3 = lrintf(vfpacc1x3);
-
- const long voutput_min_less_zero_point = params->scalar_lrint.output_min_less_zero_point;
- vrndacc0x0 = XNN_UNPREDICTABLE(vrndacc0x0 < voutput_min_less_zero_point) ? voutput_min_less_zero_point : vrndacc0x0;
- vrndacc0x1 = XNN_UNPREDICTABLE(vrndacc0x1 < voutput_min_less_zero_point) ? voutput_min_less_zero_point : vrndacc0x1;
- vrndacc0x2 = XNN_UNPREDICTABLE(vrndacc0x2 < voutput_min_less_zero_point) ? voutput_min_less_zero_point : vrndacc0x2;
- vrndacc0x3 = XNN_UNPREDICTABLE(vrndacc0x3 < voutput_min_less_zero_point) ? voutput_min_less_zero_point : vrndacc0x3;
- vrndacc1x0 = XNN_UNPREDICTABLE(vrndacc1x0 < voutput_min_less_zero_point) ? voutput_min_less_zero_point : vrndacc1x0;
- vrndacc1x1 = XNN_UNPREDICTABLE(vrndacc1x1 < voutput_min_less_zero_point) ? voutput_min_less_zero_point : vrndacc1x1;
- vrndacc1x2 = XNN_UNPREDICTABLE(vrndacc1x2 < voutput_min_less_zero_point) ? voutput_min_less_zero_point : vrndacc1x2;
- vrndacc1x3 = XNN_UNPREDICTABLE(vrndacc1x3 < voutput_min_less_zero_point) ? voutput_min_less_zero_point : vrndacc1x3;
-
- const long voutput_max_less_zero_point = params->scalar_lrint.output_max_less_zero_point;
- vrndacc0x0 = XNN_UNPREDICTABLE(vrndacc0x0 > voutput_max_less_zero_point) ? voutput_max_less_zero_point : vrndacc0x0;
- vrndacc0x1 = XNN_UNPREDICTABLE(vrndacc0x1 > voutput_max_less_zero_point) ? voutput_max_less_zero_point : vrndacc0x1;
- vrndacc0x2 = XNN_UNPREDICTABLE(vrndacc0x2 > voutput_max_less_zero_point) ? voutput_max_less_zero_point : vrndacc0x2;
- vrndacc0x3 = XNN_UNPREDICTABLE(vrndacc0x3 > voutput_max_less_zero_point) ? voutput_max_less_zero_point : vrndacc0x3;
- vrndacc1x0 = XNN_UNPREDICTABLE(vrndacc1x0 > voutput_max_less_zero_point) ? voutput_max_less_zero_point : vrndacc1x0;
- vrndacc1x1 = XNN_UNPREDICTABLE(vrndacc1x1 > voutput_max_less_zero_point) ? voutput_max_less_zero_point : vrndacc1x1;
- vrndacc1x2 = XNN_UNPREDICTABLE(vrndacc1x2 > voutput_max_less_zero_point) ? voutput_max_less_zero_point : vrndacc1x2;
- vrndacc1x3 = XNN_UNPREDICTABLE(vrndacc1x3 > voutput_max_less_zero_point) ? voutput_max_less_zero_point : vrndacc1x3;
-
- const int32_t voutput_zero_point = params->scalar_lrint.output_zero_point;
- int32_t vout0x0 = (int32_t) vrndacc0x0 + voutput_zero_point;
- int32_t vout0x1 = (int32_t) vrndacc0x1 + voutput_zero_point;
- int32_t vout0x2 = (int32_t) vrndacc0x2 + voutput_zero_point;
- int32_t vout0x3 = (int32_t) vrndacc0x3 + voutput_zero_point;
- int32_t vout1x0 = (int32_t) vrndacc1x0 + voutput_zero_point;
- int32_t vout1x1 = (int32_t) vrndacc1x1 + voutput_zero_point;
- int32_t vout1x2 = (int32_t) vrndacc1x2 + voutput_zero_point;
- int32_t vout1x3 = (int32_t) vrndacc1x3 + voutput_zero_point;
-
- if XNN_LIKELY(nc >= 4) {
- c1[0] = (int8_t) vout1x0;
- c1[1] = (int8_t) vout1x1;
- c1[2] = (int8_t) vout1x2;
- c1[3] = (int8_t) vout1x3;
- c0[0] = (int8_t) vout0x0;
- c0[1] = (int8_t) vout0x1;
- c0[2] = (int8_t) vout0x2;
- c0[3] = (int8_t) vout0x3;
-
- c1 = (int8_t*) ((uintptr_t) c1 + cn_stride);
- c0 = (int8_t*) ((uintptr_t) c0 + cn_stride);
-
- a = (const int8_t**restrict) ((uintptr_t) a - ks);
- nc -= 4;
- } else {
- if (nc & 2) {
- c1[0] = (int8_t) vout1x0;
- c1[1] = (int8_t) vout1x1;
- vout1x0 = vout1x2;
- c1 += 2;
- c0[0] = (int8_t) vout0x0;
- c0[1] = (int8_t) vout0x1;
- vout0x0 = vout0x2;
- c0 += 2;
- }
- if (nc & 1) {
- c1[0] = (int8_t) vout1x0;
- c0[0] = (int8_t) vout0x0;
- }
-
- nc = 0;
- }
- } while (nc != 0);
-}
diff --git a/src/qc8-igemm/gen/2x4-minmax-fp32-scalar-magic.c b/src/qc8-igemm/gen/2x4-minmax-fp32-scalar-lrintf.c
similarity index 78%
copy from src/qc8-igemm/gen/2x4-minmax-fp32-scalar-magic.c
copy to src/qc8-igemm/gen/2x4-minmax-fp32-scalar-lrintf.c
index 77db1b2..f275eab 100644
--- a/src/qc8-igemm/gen/2x4-minmax-fp32-scalar-magic.c
+++ b/src/qc8-igemm/gen/2x4-minmax-fp32-scalar-lrintf.c
@@ -8,14 +8,13 @@
// LICENSE file in the root directory of this source tree.
#include <assert.h>
-
-#include <fp16.h>
+#include <math.h>
#include <xnnpack/math.h>
#include <xnnpack/gemm.h>
-void xnn_qc8_igemm_minmax_fp32_ukernel_2x4__scalar_magic(
+void xnn_qc8_igemm_minmax_fp32_ukernel_2x4__scalar_lrintf(
size_t mr,
size_t nc,
size_t kc,
@@ -118,7 +117,7 @@
vfpacc1x3 *= vscale3;
w = (const void*) ((const float*) w + 4);
- const float voutput_min_less_zero_point = params->scalar_magic.output_min_less_zero_point;
+ const float voutput_min_less_zero_point = params->scalar_lrintf.output_min_less_zero_point;
vfpacc0x0 = math_max_f32(vfpacc0x0, voutput_min_less_zero_point);
vfpacc0x1 = math_max_f32(vfpacc0x1, voutput_min_less_zero_point);
vfpacc0x2 = math_max_f32(vfpacc0x2, voutput_min_less_zero_point);
@@ -128,7 +127,7 @@
vfpacc1x2 = math_max_f32(vfpacc1x2, voutput_min_less_zero_point);
vfpacc1x3 = math_max_f32(vfpacc1x3, voutput_min_less_zero_point);
- const float voutput_max_less_zero_point = params->scalar_magic.output_max_less_zero_point;
+ const float voutput_max_less_zero_point = params->scalar_lrintf.output_max_less_zero_point;
vfpacc0x0 = math_min_f32(vfpacc0x0, voutput_max_less_zero_point);
vfpacc0x1 = math_min_f32(vfpacc0x1, voutput_max_less_zero_point);
vfpacc0x2 = math_min_f32(vfpacc0x2, voutput_max_less_zero_point);
@@ -138,25 +137,24 @@
vfpacc1x2 = math_min_f32(vfpacc1x2, voutput_max_less_zero_point);
vfpacc1x3 = math_min_f32(vfpacc1x3, voutput_max_less_zero_point);
- const float vmagic_bias = params->scalar_magic.magic_bias;
- vfpacc0x0 += vmagic_bias;
- vfpacc0x1 += vmagic_bias;
- vfpacc0x2 += vmagic_bias;
- vfpacc0x3 += vmagic_bias;
- vfpacc1x0 += vmagic_bias;
- vfpacc1x1 += vmagic_bias;
- vfpacc1x2 += vmagic_bias;
- vfpacc1x3 += vmagic_bias;
+ const int32_t vrndacc0x0 = (int32_t) lrintf(vfpacc0x0);
+ const int32_t vrndacc0x1 = (int32_t) lrintf(vfpacc0x1);
+ const int32_t vrndacc0x2 = (int32_t) lrintf(vfpacc0x2);
+ const int32_t vrndacc0x3 = (int32_t) lrintf(vfpacc0x3);
+ const int32_t vrndacc1x0 = (int32_t) lrintf(vfpacc1x0);
+ const int32_t vrndacc1x1 = (int32_t) lrintf(vfpacc1x1);
+ const int32_t vrndacc1x2 = (int32_t) lrintf(vfpacc1x2);
+ const int32_t vrndacc1x3 = (int32_t) lrintf(vfpacc1x3);
- const int32_t vmagic_bias_less_output_zero_point = params->scalar_magic.magic_bias_less_output_zero_point;
- int32_t vout0x0 = (int32_t) fp32_to_bits(vfpacc0x0) - vmagic_bias_less_output_zero_point;
- int32_t vout0x1 = (int32_t) fp32_to_bits(vfpacc0x1) - vmagic_bias_less_output_zero_point;
- int32_t vout0x2 = (int32_t) fp32_to_bits(vfpacc0x2) - vmagic_bias_less_output_zero_point;
- int32_t vout0x3 = (int32_t) fp32_to_bits(vfpacc0x3) - vmagic_bias_less_output_zero_point;
- int32_t vout1x0 = (int32_t) fp32_to_bits(vfpacc1x0) - vmagic_bias_less_output_zero_point;
- int32_t vout1x1 = (int32_t) fp32_to_bits(vfpacc1x1) - vmagic_bias_less_output_zero_point;
- int32_t vout1x2 = (int32_t) fp32_to_bits(vfpacc1x2) - vmagic_bias_less_output_zero_point;
- int32_t vout1x3 = (int32_t) fp32_to_bits(vfpacc1x3) - vmagic_bias_less_output_zero_point;
+ const int32_t voutput_zero_point = params->scalar_lrintf.output_zero_point;
+ int32_t vout0x0 = vrndacc0x0 + voutput_zero_point;
+ int32_t vout0x1 = vrndacc0x1 + voutput_zero_point;
+ int32_t vout0x2 = vrndacc0x2 + voutput_zero_point;
+ int32_t vout0x3 = vrndacc0x3 + voutput_zero_point;
+ int32_t vout1x0 = vrndacc1x0 + voutput_zero_point;
+ int32_t vout1x1 = vrndacc1x1 + voutput_zero_point;
+ int32_t vout1x2 = vrndacc1x2 + voutput_zero_point;
+ int32_t vout1x3 = vrndacc1x3 + voutput_zero_point;
if XNN_LIKELY(nc >= 4) {
c1[0] = (int8_t) vout1x0;
diff --git a/src/qc8-igemm/gen/3x2-minmax-fp32-scalar-magic.c b/src/qc8-igemm/gen/3x2-minmax-fp32-scalar-fmagic.c
similarity index 93%
rename from src/qc8-igemm/gen/3x2-minmax-fp32-scalar-magic.c
rename to src/qc8-igemm/gen/3x2-minmax-fp32-scalar-fmagic.c
index 4c1293f..3fe83e3 100644
--- a/src/qc8-igemm/gen/3x2-minmax-fp32-scalar-magic.c
+++ b/src/qc8-igemm/gen/3x2-minmax-fp32-scalar-fmagic.c
@@ -15,7 +15,7 @@
#include <xnnpack/gemm.h>
-void xnn_qc8_igemm_minmax_fp32_ukernel_3x2__scalar_magic(
+void xnn_qc8_igemm_minmax_fp32_ukernel_3x2__scalar_fmagic(
size_t mr,
size_t nc,
size_t kc,
@@ -117,7 +117,7 @@
vfpacc2x1 *= vscale1;
w = (const void*) ((const float*) w + 2);
- const float voutput_min_less_zero_point = params->scalar_magic.output_min_less_zero_point;
+ const float voutput_min_less_zero_point = params->scalar_fmagic.output_min_less_zero_point;
vfpacc0x0 = math_max_f32(vfpacc0x0, voutput_min_less_zero_point);
vfpacc0x1 = math_max_f32(vfpacc0x1, voutput_min_less_zero_point);
vfpacc1x0 = math_max_f32(vfpacc1x0, voutput_min_less_zero_point);
@@ -125,7 +125,7 @@
vfpacc2x0 = math_max_f32(vfpacc2x0, voutput_min_less_zero_point);
vfpacc2x1 = math_max_f32(vfpacc2x1, voutput_min_less_zero_point);
- const float voutput_max_less_zero_point = params->scalar_magic.output_max_less_zero_point;
+ const float voutput_max_less_zero_point = params->scalar_fmagic.output_max_less_zero_point;
vfpacc0x0 = math_min_f32(vfpacc0x0, voutput_max_less_zero_point);
vfpacc0x1 = math_min_f32(vfpacc0x1, voutput_max_less_zero_point);
vfpacc1x0 = math_min_f32(vfpacc1x0, voutput_max_less_zero_point);
@@ -133,7 +133,7 @@
vfpacc2x0 = math_min_f32(vfpacc2x0, voutput_max_less_zero_point);
vfpacc2x1 = math_min_f32(vfpacc2x1, voutput_max_less_zero_point);
- const float vmagic_bias = params->scalar_magic.magic_bias;
+ const float vmagic_bias = params->scalar_fmagic.magic_bias;
vfpacc0x0 += vmagic_bias;
vfpacc0x1 += vmagic_bias;
vfpacc1x0 += vmagic_bias;
@@ -141,7 +141,7 @@
vfpacc2x0 += vmagic_bias;
vfpacc2x1 += vmagic_bias;
- const int32_t vmagic_bias_less_output_zero_point = params->scalar_magic.magic_bias_less_output_zero_point;
+ const int32_t vmagic_bias_less_output_zero_point = params->scalar_fmagic.magic_bias_less_output_zero_point;
int32_t vout0x0 = (int32_t) fp32_to_bits(vfpacc0x0) - vmagic_bias_less_output_zero_point;
int32_t vout0x1 = (int32_t) fp32_to_bits(vfpacc0x1) - vmagic_bias_less_output_zero_point;
int32_t vout1x0 = (int32_t) fp32_to_bits(vfpacc1x0) - vmagic_bias_less_output_zero_point;
diff --git a/src/qc8-igemm/gen/3x2-minmax-fp32-scalar-lrint.c b/src/qc8-igemm/gen/3x2-minmax-fp32-scalar-lrint.c
deleted file mode 100644
index 595dbbb..0000000
--- a/src/qc8-igemm/gen/3x2-minmax-fp32-scalar-lrint.c
+++ /dev/null
@@ -1,174 +0,0 @@
-// Auto-generated file. Do not edit!
-// Template: src/qs8-igemm/scalar.c.in
-// Generator: tools/xngen
-//
-// Copyright 2021 Google LLC
-//
-// This source code is licensed under the BSD-style license found in the
-// LICENSE file in the root directory of this source tree.
-
-#include <assert.h>
-#include <math.h>
-
-#include <xnnpack/math.h>
-#include <xnnpack/gemm.h>
-
-
-void xnn_qc8_igemm_minmax_fp32_ukernel_3x2__scalar_lrint(
- size_t mr,
- size_t nc,
- size_t kc,
- size_t ks,
- const int8_t**restrict a,
- const void*restrict w,
- int8_t*restrict c,
- size_t cm_stride,
- size_t cn_stride,
- size_t a_offset,
- const int8_t* zero,
- const union xnn_qs8_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
-{
- assert(mr != 0);
- assert(mr <= 3);
- assert(nc != 0);
- assert(kc != 0);
- assert(ks != 0);
- assert(ks % (3 * sizeof(void*)) == 0);
- assert(a != NULL);
- assert(w != NULL);
- assert(c != NULL);
-
- int8_t* c0 = c;
- int8_t* c1 = (int8_t*) ((uintptr_t) c0 + cm_stride);
- if XNN_UNPREDICTABLE(mr < 2) {
- c1 = c0;
- }
- int8_t* c2 = (int8_t*) ((uintptr_t) c1 + cm_stride);
- if XNN_UNPREDICTABLE(mr <= 2) {
- c2 = c1;
- }
-
- do {
- int32_t vacc0x0 = ((const int32_t*) w)[0];
- int32_t vacc0x1 = ((const int32_t*) w)[1];
- int32_t vacc1x0 = vacc0x0;
- int32_t vacc1x1 = vacc0x1;
- int32_t vacc2x0 = vacc0x0;
- int32_t vacc2x1 = vacc0x1;
- w = (const void*) ((const int32_t*) w + 2);
-
- size_t p = ks;
- do {
- const int8_t* restrict a0 = a[0];
- assert(a0 != NULL);
- if XNN_UNPREDICTABLE(a0 != zero) {
- a0 = (const int8_t*) ((uintptr_t) a0 + a_offset);
- }
- const int8_t* restrict a1 = a[1];
- assert(a1 != NULL);
- if XNN_UNPREDICTABLE(a1 != zero) {
- a1 = (const int8_t*) ((uintptr_t) a1 + a_offset);
- }
- const int8_t* restrict a2 = a[2];
- assert(a2 != NULL);
- if XNN_UNPREDICTABLE(a2 != zero) {
- a2 = (const int8_t*) ((uintptr_t) a2 + a_offset);
- }
- a += 3;
-
- size_t k = kc;
- do {
- const int32_t va0 = (int32_t) *a0++;
- const int32_t va1 = (int32_t) *a1++;
- const int32_t va2 = (int32_t) *a2++;
-
- const int32_t vb0 = (int32_t) ((const int8_t*) w)[0];
- const int32_t vb1 = (int32_t) ((const int8_t*) w)[1];
- w = (const void*) ((const int8_t*) w + 2);
-
- vacc0x0 += va0 * vb0;
- vacc0x1 += va0 * vb1;
- vacc1x0 += va1 * vb0;
- vacc1x1 += va1 * vb1;
- vacc2x0 += va2 * vb0;
- vacc2x1 += va2 * vb1;
-
- k -= sizeof(int8_t);
- } while (k != 0);
- p -= 3 * sizeof(void*);
- } while (p != 0);
-
- float vfpacc0x0 = (float) vacc0x0;
- float vfpacc0x1 = (float) vacc0x1;
- float vfpacc1x0 = (float) vacc1x0;
- float vfpacc1x1 = (float) vacc1x1;
- float vfpacc2x0 = (float) vacc2x0;
- float vfpacc2x1 = (float) vacc2x1;
-
- typedef XNN_UNALIGNED float unaligned_float;
- const float vscale0 = ((const unaligned_float*) w)[0];
- vfpacc0x0 *= vscale0;
- vfpacc1x0 *= vscale0;
- vfpacc2x0 *= vscale0;
- const float vscale1 = ((const unaligned_float*) w)[1];
- vfpacc0x1 *= vscale1;
- vfpacc1x1 *= vscale1;
- vfpacc2x1 *= vscale1;
- w = (const void*) ((const float*) w + 2);
-
- long vrndacc0x0 = lrintf(vfpacc0x0);
- long vrndacc0x1 = lrintf(vfpacc0x1);
- long vrndacc1x0 = lrintf(vfpacc1x0);
- long vrndacc1x1 = lrintf(vfpacc1x1);
- long vrndacc2x0 = lrintf(vfpacc2x0);
- long vrndacc2x1 = lrintf(vfpacc2x1);
-
- const long voutput_min_less_zero_point = params->scalar_lrint.output_min_less_zero_point;
- vrndacc0x0 = XNN_UNPREDICTABLE(vrndacc0x0 < voutput_min_less_zero_point) ? voutput_min_less_zero_point : vrndacc0x0;
- vrndacc0x1 = XNN_UNPREDICTABLE(vrndacc0x1 < voutput_min_less_zero_point) ? voutput_min_less_zero_point : vrndacc0x1;
- vrndacc1x0 = XNN_UNPREDICTABLE(vrndacc1x0 < voutput_min_less_zero_point) ? voutput_min_less_zero_point : vrndacc1x0;
- vrndacc1x1 = XNN_UNPREDICTABLE(vrndacc1x1 < voutput_min_less_zero_point) ? voutput_min_less_zero_point : vrndacc1x1;
- vrndacc2x0 = XNN_UNPREDICTABLE(vrndacc2x0 < voutput_min_less_zero_point) ? voutput_min_less_zero_point : vrndacc2x0;
- vrndacc2x1 = XNN_UNPREDICTABLE(vrndacc2x1 < voutput_min_less_zero_point) ? voutput_min_less_zero_point : vrndacc2x1;
-
- const long voutput_max_less_zero_point = params->scalar_lrint.output_max_less_zero_point;
- vrndacc0x0 = XNN_UNPREDICTABLE(vrndacc0x0 > voutput_max_less_zero_point) ? voutput_max_less_zero_point : vrndacc0x0;
- vrndacc0x1 = XNN_UNPREDICTABLE(vrndacc0x1 > voutput_max_less_zero_point) ? voutput_max_less_zero_point : vrndacc0x1;
- vrndacc1x0 = XNN_UNPREDICTABLE(vrndacc1x0 > voutput_max_less_zero_point) ? voutput_max_less_zero_point : vrndacc1x0;
- vrndacc1x1 = XNN_UNPREDICTABLE(vrndacc1x1 > voutput_max_less_zero_point) ? voutput_max_less_zero_point : vrndacc1x1;
- vrndacc2x0 = XNN_UNPREDICTABLE(vrndacc2x0 > voutput_max_less_zero_point) ? voutput_max_less_zero_point : vrndacc2x0;
- vrndacc2x1 = XNN_UNPREDICTABLE(vrndacc2x1 > voutput_max_less_zero_point) ? voutput_max_less_zero_point : vrndacc2x1;
-
- const int32_t voutput_zero_point = params->scalar_lrint.output_zero_point;
- int32_t vout0x0 = (int32_t) vrndacc0x0 + voutput_zero_point;
- int32_t vout0x1 = (int32_t) vrndacc0x1 + voutput_zero_point;
- int32_t vout1x0 = (int32_t) vrndacc1x0 + voutput_zero_point;
- int32_t vout1x1 = (int32_t) vrndacc1x1 + voutput_zero_point;
- int32_t vout2x0 = (int32_t) vrndacc2x0 + voutput_zero_point;
- int32_t vout2x1 = (int32_t) vrndacc2x1 + voutput_zero_point;
-
- if XNN_LIKELY(nc >= 2) {
- c2[0] = (int8_t) vout2x0;
- c2[1] = (int8_t) vout2x1;
- c1[0] = (int8_t) vout1x0;
- c1[1] = (int8_t) vout1x1;
- c0[0] = (int8_t) vout0x0;
- c0[1] = (int8_t) vout0x1;
-
- c2 = (int8_t*) ((uintptr_t) c2 + cn_stride);
- c1 = (int8_t*) ((uintptr_t) c1 + cn_stride);
- c0 = (int8_t*) ((uintptr_t) c0 + cn_stride);
-
- a = (const int8_t**restrict) ((uintptr_t) a - ks);
- nc -= 2;
- } else {
- if (nc & 1) {
- c2[0] = (int8_t) vout2x0;
- c1[0] = (int8_t) vout1x0;
- c0[0] = (int8_t) vout0x0;
- }
-
- nc = 0;
- }
- } while (nc != 0);
-}
diff --git a/src/qc8-igemm/gen/3x2-minmax-fp32-scalar-magic.c b/src/qc8-igemm/gen/3x2-minmax-fp32-scalar-lrintf.c
similarity index 79%
copy from src/qc8-igemm/gen/3x2-minmax-fp32-scalar-magic.c
copy to src/qc8-igemm/gen/3x2-minmax-fp32-scalar-lrintf.c
index 4c1293f..03ece9e 100644
--- a/src/qc8-igemm/gen/3x2-minmax-fp32-scalar-magic.c
+++ b/src/qc8-igemm/gen/3x2-minmax-fp32-scalar-lrintf.c
@@ -8,14 +8,13 @@
// LICENSE file in the root directory of this source tree.
#include <assert.h>
-
-#include <fp16.h>
+#include <math.h>
#include <xnnpack/math.h>
#include <xnnpack/gemm.h>
-void xnn_qc8_igemm_minmax_fp32_ukernel_3x2__scalar_magic(
+void xnn_qc8_igemm_minmax_fp32_ukernel_3x2__scalar_lrintf(
size_t mr,
size_t nc,
size_t kc,
@@ -117,7 +116,7 @@
vfpacc2x1 *= vscale1;
w = (const void*) ((const float*) w + 2);
- const float voutput_min_less_zero_point = params->scalar_magic.output_min_less_zero_point;
+ const float voutput_min_less_zero_point = params->scalar_lrintf.output_min_less_zero_point;
vfpacc0x0 = math_max_f32(vfpacc0x0, voutput_min_less_zero_point);
vfpacc0x1 = math_max_f32(vfpacc0x1, voutput_min_less_zero_point);
vfpacc1x0 = math_max_f32(vfpacc1x0, voutput_min_less_zero_point);
@@ -125,7 +124,7 @@
vfpacc2x0 = math_max_f32(vfpacc2x0, voutput_min_less_zero_point);
vfpacc2x1 = math_max_f32(vfpacc2x1, voutput_min_less_zero_point);
- const float voutput_max_less_zero_point = params->scalar_magic.output_max_less_zero_point;
+ const float voutput_max_less_zero_point = params->scalar_lrintf.output_max_less_zero_point;
vfpacc0x0 = math_min_f32(vfpacc0x0, voutput_max_less_zero_point);
vfpacc0x1 = math_min_f32(vfpacc0x1, voutput_max_less_zero_point);
vfpacc1x0 = math_min_f32(vfpacc1x0, voutput_max_less_zero_point);
@@ -133,21 +132,20 @@
vfpacc2x0 = math_min_f32(vfpacc2x0, voutput_max_less_zero_point);
vfpacc2x1 = math_min_f32(vfpacc2x1, voutput_max_less_zero_point);
- const float vmagic_bias = params->scalar_magic.magic_bias;
- vfpacc0x0 += vmagic_bias;
- vfpacc0x1 += vmagic_bias;
- vfpacc1x0 += vmagic_bias;
- vfpacc1x1 += vmagic_bias;
- vfpacc2x0 += vmagic_bias;
- vfpacc2x1 += vmagic_bias;
+ const int32_t vrndacc0x0 = (int32_t) lrintf(vfpacc0x0);
+ const int32_t vrndacc0x1 = (int32_t) lrintf(vfpacc0x1);
+ const int32_t vrndacc1x0 = (int32_t) lrintf(vfpacc1x0);
+ const int32_t vrndacc1x1 = (int32_t) lrintf(vfpacc1x1);
+ const int32_t vrndacc2x0 = (int32_t) lrintf(vfpacc2x0);
+ const int32_t vrndacc2x1 = (int32_t) lrintf(vfpacc2x1);
- const int32_t vmagic_bias_less_output_zero_point = params->scalar_magic.magic_bias_less_output_zero_point;
- int32_t vout0x0 = (int32_t) fp32_to_bits(vfpacc0x0) - vmagic_bias_less_output_zero_point;
- int32_t vout0x1 = (int32_t) fp32_to_bits(vfpacc0x1) - vmagic_bias_less_output_zero_point;
- int32_t vout1x0 = (int32_t) fp32_to_bits(vfpacc1x0) - vmagic_bias_less_output_zero_point;
- int32_t vout1x1 = (int32_t) fp32_to_bits(vfpacc1x1) - vmagic_bias_less_output_zero_point;
- int32_t vout2x0 = (int32_t) fp32_to_bits(vfpacc2x0) - vmagic_bias_less_output_zero_point;
- int32_t vout2x1 = (int32_t) fp32_to_bits(vfpacc2x1) - vmagic_bias_less_output_zero_point;
+ const int32_t voutput_zero_point = params->scalar_lrintf.output_zero_point;
+ int32_t vout0x0 = vrndacc0x0 + voutput_zero_point;
+ int32_t vout0x1 = vrndacc0x1 + voutput_zero_point;
+ int32_t vout1x0 = vrndacc1x0 + voutput_zero_point;
+ int32_t vout1x1 = vrndacc1x1 + voutput_zero_point;
+ int32_t vout2x0 = vrndacc2x0 + voutput_zero_point;
+ int32_t vout2x1 = vrndacc2x1 + voutput_zero_point;
if XNN_LIKELY(nc >= 2) {
c2[0] = (int8_t) vout2x0;
diff --git a/src/qc8-igemm/gen/3x4-minmax-fp32-scalar-magic.c b/src/qc8-igemm/gen/3x4-minmax-fp32-scalar-fmagic.c
similarity index 95%
rename from src/qc8-igemm/gen/3x4-minmax-fp32-scalar-magic.c
rename to src/qc8-igemm/gen/3x4-minmax-fp32-scalar-fmagic.c
index 208d5d9..167034f 100644
--- a/src/qc8-igemm/gen/3x4-minmax-fp32-scalar-magic.c
+++ b/src/qc8-igemm/gen/3x4-minmax-fp32-scalar-fmagic.c
@@ -15,7 +15,7 @@
#include <xnnpack/gemm.h>
-void xnn_qc8_igemm_minmax_fp32_ukernel_3x4__scalar_magic(
+void xnn_qc8_igemm_minmax_fp32_ukernel_3x4__scalar_fmagic(
size_t mr,
size_t nc,
size_t kc,
@@ -144,7 +144,7 @@
vfpacc2x3 *= vscale3;
w = (const void*) ((const float*) w + 4);
- const float voutput_min_less_zero_point = params->scalar_magic.output_min_less_zero_point;
+ const float voutput_min_less_zero_point = params->scalar_fmagic.output_min_less_zero_point;
vfpacc0x0 = math_max_f32(vfpacc0x0, voutput_min_less_zero_point);
vfpacc0x1 = math_max_f32(vfpacc0x1, voutput_min_less_zero_point);
vfpacc0x2 = math_max_f32(vfpacc0x2, voutput_min_less_zero_point);
@@ -158,7 +158,7 @@
vfpacc2x2 = math_max_f32(vfpacc2x2, voutput_min_less_zero_point);
vfpacc2x3 = math_max_f32(vfpacc2x3, voutput_min_less_zero_point);
- const float voutput_max_less_zero_point = params->scalar_magic.output_max_less_zero_point;
+ const float voutput_max_less_zero_point = params->scalar_fmagic.output_max_less_zero_point;
vfpacc0x0 = math_min_f32(vfpacc0x0, voutput_max_less_zero_point);
vfpacc0x1 = math_min_f32(vfpacc0x1, voutput_max_less_zero_point);
vfpacc0x2 = math_min_f32(vfpacc0x2, voutput_max_less_zero_point);
@@ -172,7 +172,7 @@
vfpacc2x2 = math_min_f32(vfpacc2x2, voutput_max_less_zero_point);
vfpacc2x3 = math_min_f32(vfpacc2x3, voutput_max_less_zero_point);
- const float vmagic_bias = params->scalar_magic.magic_bias;
+ const float vmagic_bias = params->scalar_fmagic.magic_bias;
vfpacc0x0 += vmagic_bias;
vfpacc0x1 += vmagic_bias;
vfpacc0x2 += vmagic_bias;
@@ -186,7 +186,7 @@
vfpacc2x2 += vmagic_bias;
vfpacc2x3 += vmagic_bias;
- const int32_t vmagic_bias_less_output_zero_point = params->scalar_magic.magic_bias_less_output_zero_point;
+ const int32_t vmagic_bias_less_output_zero_point = params->scalar_fmagic.magic_bias_less_output_zero_point;
int32_t vout0x0 = (int32_t) fp32_to_bits(vfpacc0x0) - vmagic_bias_less_output_zero_point;
int32_t vout0x1 = (int32_t) fp32_to_bits(vfpacc0x1) - vmagic_bias_less_output_zero_point;
int32_t vout0x2 = (int32_t) fp32_to_bits(vfpacc0x2) - vmagic_bias_less_output_zero_point;
diff --git a/src/qc8-igemm/gen/3x4-minmax-fp32-scalar-lrint.c b/src/qc8-igemm/gen/3x4-minmax-fp32-scalar-lrint.c
deleted file mode 100644
index ace6de9..0000000
--- a/src/qc8-igemm/gen/3x4-minmax-fp32-scalar-lrint.c
+++ /dev/null
@@ -1,245 +0,0 @@
-// Auto-generated file. Do not edit!
-// Template: src/qs8-igemm/scalar.c.in
-// Generator: tools/xngen
-//
-// Copyright 2021 Google LLC
-//
-// This source code is licensed under the BSD-style license found in the
-// LICENSE file in the root directory of this source tree.
-
-#include <assert.h>
-#include <math.h>
-
-#include <xnnpack/math.h>
-#include <xnnpack/gemm.h>
-
-
-void xnn_qc8_igemm_minmax_fp32_ukernel_3x4__scalar_lrint(
- size_t mr,
- size_t nc,
- size_t kc,
- size_t ks,
- const int8_t**restrict a,
- const void*restrict w,
- int8_t*restrict c,
- size_t cm_stride,
- size_t cn_stride,
- size_t a_offset,
- const int8_t* zero,
- const union xnn_qs8_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
-{
- assert(mr != 0);
- assert(mr <= 3);
- assert(nc != 0);
- assert(kc != 0);
- assert(ks != 0);
- assert(ks % (3 * sizeof(void*)) == 0);
- assert(a != NULL);
- assert(w != NULL);
- assert(c != NULL);
-
- int8_t* c0 = c;
- int8_t* c1 = (int8_t*) ((uintptr_t) c0 + cm_stride);
- if XNN_UNPREDICTABLE(mr < 2) {
- c1 = c0;
- }
- int8_t* c2 = (int8_t*) ((uintptr_t) c1 + cm_stride);
- if XNN_UNPREDICTABLE(mr <= 2) {
- c2 = c1;
- }
-
- do {
- int32_t vacc0x0 = ((const int32_t*) w)[0];
- int32_t vacc0x1 = ((const int32_t*) w)[1];
- int32_t vacc0x2 = ((const int32_t*) w)[2];
- int32_t vacc0x3 = ((const int32_t*) w)[3];
- int32_t vacc1x0 = vacc0x0;
- int32_t vacc1x1 = vacc0x1;
- int32_t vacc1x2 = vacc0x2;
- int32_t vacc1x3 = vacc0x3;
- int32_t vacc2x0 = vacc0x0;
- int32_t vacc2x1 = vacc0x1;
- int32_t vacc2x2 = vacc0x2;
- int32_t vacc2x3 = vacc0x3;
- w = (const void*) ((const int32_t*) w + 4);
-
- size_t p = ks;
- do {
- const int8_t* restrict a0 = a[0];
- assert(a0 != NULL);
- if XNN_UNPREDICTABLE(a0 != zero) {
- a0 = (const int8_t*) ((uintptr_t) a0 + a_offset);
- }
- const int8_t* restrict a1 = a[1];
- assert(a1 != NULL);
- if XNN_UNPREDICTABLE(a1 != zero) {
- a1 = (const int8_t*) ((uintptr_t) a1 + a_offset);
- }
- const int8_t* restrict a2 = a[2];
- assert(a2 != NULL);
- if XNN_UNPREDICTABLE(a2 != zero) {
- a2 = (const int8_t*) ((uintptr_t) a2 + a_offset);
- }
- a += 3;
-
- size_t k = kc;
- do {
- const int32_t va0 = (int32_t) *a0++;
- const int32_t va1 = (int32_t) *a1++;
- const int32_t va2 = (int32_t) *a2++;
-
- const int32_t vb0 = (int32_t) ((const int8_t*) w)[0];
- const int32_t vb1 = (int32_t) ((const int8_t*) w)[1];
- const int32_t vb2 = (int32_t) ((const int8_t*) w)[2];
- const int32_t vb3 = (int32_t) ((const int8_t*) w)[3];
- w = (const void*) ((const int8_t*) w + 4);
-
- vacc0x0 += va0 * vb0;
- vacc0x1 += va0 * vb1;
- vacc0x2 += va0 * vb2;
- vacc0x3 += va0 * vb3;
- vacc1x0 += va1 * vb0;
- vacc1x1 += va1 * vb1;
- vacc1x2 += va1 * vb2;
- vacc1x3 += va1 * vb3;
- vacc2x0 += va2 * vb0;
- vacc2x1 += va2 * vb1;
- vacc2x2 += va2 * vb2;
- vacc2x3 += va2 * vb3;
-
- k -= sizeof(int8_t);
- } while (k != 0);
- p -= 3 * sizeof(void*);
- } while (p != 0);
-
- float vfpacc0x0 = (float) vacc0x0;
- float vfpacc0x1 = (float) vacc0x1;
- float vfpacc0x2 = (float) vacc0x2;
- float vfpacc0x3 = (float) vacc0x3;
- float vfpacc1x0 = (float) vacc1x0;
- float vfpacc1x1 = (float) vacc1x1;
- float vfpacc1x2 = (float) vacc1x2;
- float vfpacc1x3 = (float) vacc1x3;
- float vfpacc2x0 = (float) vacc2x0;
- float vfpacc2x1 = (float) vacc2x1;
- float vfpacc2x2 = (float) vacc2x2;
- float vfpacc2x3 = (float) vacc2x3;
-
- const float vscale0 = ((const float*) w)[0];
- vfpacc0x0 *= vscale0;
- vfpacc1x0 *= vscale0;
- vfpacc2x0 *= vscale0;
- const float vscale1 = ((const float*) w)[1];
- vfpacc0x1 *= vscale1;
- vfpacc1x1 *= vscale1;
- vfpacc2x1 *= vscale1;
- const float vscale2 = ((const float*) w)[2];
- vfpacc0x2 *= vscale2;
- vfpacc1x2 *= vscale2;
- vfpacc2x2 *= vscale2;
- const float vscale3 = ((const float*) w)[3];
- vfpacc0x3 *= vscale3;
- vfpacc1x3 *= vscale3;
- vfpacc2x3 *= vscale3;
- w = (const void*) ((const float*) w + 4);
-
- long vrndacc0x0 = lrintf(vfpacc0x0);
- long vrndacc0x1 = lrintf(vfpacc0x1);
- long vrndacc0x2 = lrintf(vfpacc0x2);
- long vrndacc0x3 = lrintf(vfpacc0x3);
- long vrndacc1x0 = lrintf(vfpacc1x0);
- long vrndacc1x1 = lrintf(vfpacc1x1);
- long vrndacc1x2 = lrintf(vfpacc1x2);
- long vrndacc1x3 = lrintf(vfpacc1x3);
- long vrndacc2x0 = lrintf(vfpacc2x0);
- long vrndacc2x1 = lrintf(vfpacc2x1);
- long vrndacc2x2 = lrintf(vfpacc2x2);
- long vrndacc2x3 = lrintf(vfpacc2x3);
-
- const long voutput_min_less_zero_point = params->scalar_lrint.output_min_less_zero_point;
- vrndacc0x0 = XNN_UNPREDICTABLE(vrndacc0x0 < voutput_min_less_zero_point) ? voutput_min_less_zero_point : vrndacc0x0;
- vrndacc0x1 = XNN_UNPREDICTABLE(vrndacc0x1 < voutput_min_less_zero_point) ? voutput_min_less_zero_point : vrndacc0x1;
- vrndacc0x2 = XNN_UNPREDICTABLE(vrndacc0x2 < voutput_min_less_zero_point) ? voutput_min_less_zero_point : vrndacc0x2;
- vrndacc0x3 = XNN_UNPREDICTABLE(vrndacc0x3 < voutput_min_less_zero_point) ? voutput_min_less_zero_point : vrndacc0x3;
- vrndacc1x0 = XNN_UNPREDICTABLE(vrndacc1x0 < voutput_min_less_zero_point) ? voutput_min_less_zero_point : vrndacc1x0;
- vrndacc1x1 = XNN_UNPREDICTABLE(vrndacc1x1 < voutput_min_less_zero_point) ? voutput_min_less_zero_point : vrndacc1x1;
- vrndacc1x2 = XNN_UNPREDICTABLE(vrndacc1x2 < voutput_min_less_zero_point) ? voutput_min_less_zero_point : vrndacc1x2;
- vrndacc1x3 = XNN_UNPREDICTABLE(vrndacc1x3 < voutput_min_less_zero_point) ? voutput_min_less_zero_point : vrndacc1x3;
- vrndacc2x0 = XNN_UNPREDICTABLE(vrndacc2x0 < voutput_min_less_zero_point) ? voutput_min_less_zero_point : vrndacc2x0;
- vrndacc2x1 = XNN_UNPREDICTABLE(vrndacc2x1 < voutput_min_less_zero_point) ? voutput_min_less_zero_point : vrndacc2x1;
- vrndacc2x2 = XNN_UNPREDICTABLE(vrndacc2x2 < voutput_min_less_zero_point) ? voutput_min_less_zero_point : vrndacc2x2;
- vrndacc2x3 = XNN_UNPREDICTABLE(vrndacc2x3 < voutput_min_less_zero_point) ? voutput_min_less_zero_point : vrndacc2x3;
-
- const long voutput_max_less_zero_point = params->scalar_lrint.output_max_less_zero_point;
- vrndacc0x0 = XNN_UNPREDICTABLE(vrndacc0x0 > voutput_max_less_zero_point) ? voutput_max_less_zero_point : vrndacc0x0;
- vrndacc0x1 = XNN_UNPREDICTABLE(vrndacc0x1 > voutput_max_less_zero_point) ? voutput_max_less_zero_point : vrndacc0x1;
- vrndacc0x2 = XNN_UNPREDICTABLE(vrndacc0x2 > voutput_max_less_zero_point) ? voutput_max_less_zero_point : vrndacc0x2;
- vrndacc0x3 = XNN_UNPREDICTABLE(vrndacc0x3 > voutput_max_less_zero_point) ? voutput_max_less_zero_point : vrndacc0x3;
- vrndacc1x0 = XNN_UNPREDICTABLE(vrndacc1x0 > voutput_max_less_zero_point) ? voutput_max_less_zero_point : vrndacc1x0;
- vrndacc1x1 = XNN_UNPREDICTABLE(vrndacc1x1 > voutput_max_less_zero_point) ? voutput_max_less_zero_point : vrndacc1x1;
- vrndacc1x2 = XNN_UNPREDICTABLE(vrndacc1x2 > voutput_max_less_zero_point) ? voutput_max_less_zero_point : vrndacc1x2;
- vrndacc1x3 = XNN_UNPREDICTABLE(vrndacc1x3 > voutput_max_less_zero_point) ? voutput_max_less_zero_point : vrndacc1x3;
- vrndacc2x0 = XNN_UNPREDICTABLE(vrndacc2x0 > voutput_max_less_zero_point) ? voutput_max_less_zero_point : vrndacc2x0;
- vrndacc2x1 = XNN_UNPREDICTABLE(vrndacc2x1 > voutput_max_less_zero_point) ? voutput_max_less_zero_point : vrndacc2x1;
- vrndacc2x2 = XNN_UNPREDICTABLE(vrndacc2x2 > voutput_max_less_zero_point) ? voutput_max_less_zero_point : vrndacc2x2;
- vrndacc2x3 = XNN_UNPREDICTABLE(vrndacc2x3 > voutput_max_less_zero_point) ? voutput_max_less_zero_point : vrndacc2x3;
-
- const int32_t voutput_zero_point = params->scalar_lrint.output_zero_point;
- int32_t vout0x0 = (int32_t) vrndacc0x0 + voutput_zero_point;
- int32_t vout0x1 = (int32_t) vrndacc0x1 + voutput_zero_point;
- int32_t vout0x2 = (int32_t) vrndacc0x2 + voutput_zero_point;
- int32_t vout0x3 = (int32_t) vrndacc0x3 + voutput_zero_point;
- int32_t vout1x0 = (int32_t) vrndacc1x0 + voutput_zero_point;
- int32_t vout1x1 = (int32_t) vrndacc1x1 + voutput_zero_point;
- int32_t vout1x2 = (int32_t) vrndacc1x2 + voutput_zero_point;
- int32_t vout1x3 = (int32_t) vrndacc1x3 + voutput_zero_point;
- int32_t vout2x0 = (int32_t) vrndacc2x0 + voutput_zero_point;
- int32_t vout2x1 = (int32_t) vrndacc2x1 + voutput_zero_point;
- int32_t vout2x2 = (int32_t) vrndacc2x2 + voutput_zero_point;
- int32_t vout2x3 = (int32_t) vrndacc2x3 + voutput_zero_point;
-
- if XNN_LIKELY(nc >= 4) {
- c2[0] = (int8_t) vout2x0;
- c2[1] = (int8_t) vout2x1;
- c2[2] = (int8_t) vout2x2;
- c2[3] = (int8_t) vout2x3;
- c1[0] = (int8_t) vout1x0;
- c1[1] = (int8_t) vout1x1;
- c1[2] = (int8_t) vout1x2;
- c1[3] = (int8_t) vout1x3;
- c0[0] = (int8_t) vout0x0;
- c0[1] = (int8_t) vout0x1;
- c0[2] = (int8_t) vout0x2;
- c0[3] = (int8_t) vout0x3;
-
- c2 = (int8_t*) ((uintptr_t) c2 + cn_stride);
- c1 = (int8_t*) ((uintptr_t) c1 + cn_stride);
- c0 = (int8_t*) ((uintptr_t) c0 + cn_stride);
-
- a = (const int8_t**restrict) ((uintptr_t) a - ks);
- nc -= 4;
- } else {
- if (nc & 2) {
- c2[0] = (int8_t) vout2x0;
- c2[1] = (int8_t) vout2x1;
- vout2x0 = vout2x2;
- c2 += 2;
- c1[0] = (int8_t) vout1x0;
- c1[1] = (int8_t) vout1x1;
- vout1x0 = vout1x2;
- c1 += 2;
- c0[0] = (int8_t) vout0x0;
- c0[1] = (int8_t) vout0x1;
- vout0x0 = vout0x2;
- c0 += 2;
- }
- if (nc & 1) {
- c2[0] = (int8_t) vout2x0;
- c1[0] = (int8_t) vout1x0;
- c0[0] = (int8_t) vout0x0;
- }
-
- nc = 0;
- }
- } while (nc != 0);
-}
diff --git a/src/qc8-igemm/gen/3x4-minmax-fp32-scalar-magic.c b/src/qc8-igemm/gen/3x4-minmax-fp32-scalar-lrintf.c
similarity index 78%
copy from src/qc8-igemm/gen/3x4-minmax-fp32-scalar-magic.c
copy to src/qc8-igemm/gen/3x4-minmax-fp32-scalar-lrintf.c
index 208d5d9..a5c20b6 100644
--- a/src/qc8-igemm/gen/3x4-minmax-fp32-scalar-magic.c
+++ b/src/qc8-igemm/gen/3x4-minmax-fp32-scalar-lrintf.c
@@ -8,14 +8,13 @@
// LICENSE file in the root directory of this source tree.
#include <assert.h>
-
-#include <fp16.h>
+#include <math.h>
#include <xnnpack/math.h>
#include <xnnpack/gemm.h>
-void xnn_qc8_igemm_minmax_fp32_ukernel_3x4__scalar_magic(
+void xnn_qc8_igemm_minmax_fp32_ukernel_3x4__scalar_lrintf(
size_t mr,
size_t nc,
size_t kc,
@@ -144,7 +143,7 @@
vfpacc2x3 *= vscale3;
w = (const void*) ((const float*) w + 4);
- const float voutput_min_less_zero_point = params->scalar_magic.output_min_less_zero_point;
+ const float voutput_min_less_zero_point = params->scalar_lrintf.output_min_less_zero_point;
vfpacc0x0 = math_max_f32(vfpacc0x0, voutput_min_less_zero_point);
vfpacc0x1 = math_max_f32(vfpacc0x1, voutput_min_less_zero_point);
vfpacc0x2 = math_max_f32(vfpacc0x2, voutput_min_less_zero_point);
@@ -158,7 +157,7 @@
vfpacc2x2 = math_max_f32(vfpacc2x2, voutput_min_less_zero_point);
vfpacc2x3 = math_max_f32(vfpacc2x3, voutput_min_less_zero_point);
- const float voutput_max_less_zero_point = params->scalar_magic.output_max_less_zero_point;
+ const float voutput_max_less_zero_point = params->scalar_lrintf.output_max_less_zero_point;
vfpacc0x0 = math_min_f32(vfpacc0x0, voutput_max_less_zero_point);
vfpacc0x1 = math_min_f32(vfpacc0x1, voutput_max_less_zero_point);
vfpacc0x2 = math_min_f32(vfpacc0x2, voutput_max_less_zero_point);
@@ -172,33 +171,32 @@
vfpacc2x2 = math_min_f32(vfpacc2x2, voutput_max_less_zero_point);
vfpacc2x3 = math_min_f32(vfpacc2x3, voutput_max_less_zero_point);
- const float vmagic_bias = params->scalar_magic.magic_bias;
- vfpacc0x0 += vmagic_bias;
- vfpacc0x1 += vmagic_bias;
- vfpacc0x2 += vmagic_bias;
- vfpacc0x3 += vmagic_bias;
- vfpacc1x0 += vmagic_bias;
- vfpacc1x1 += vmagic_bias;
- vfpacc1x2 += vmagic_bias;
- vfpacc1x3 += vmagic_bias;
- vfpacc2x0 += vmagic_bias;
- vfpacc2x1 += vmagic_bias;
- vfpacc2x2 += vmagic_bias;
- vfpacc2x3 += vmagic_bias;
+ const int32_t vrndacc0x0 = (int32_t) lrintf(vfpacc0x0);
+ const int32_t vrndacc0x1 = (int32_t) lrintf(vfpacc0x1);
+ const int32_t vrndacc0x2 = (int32_t) lrintf(vfpacc0x2);
+ const int32_t vrndacc0x3 = (int32_t) lrintf(vfpacc0x3);
+ const int32_t vrndacc1x0 = (int32_t) lrintf(vfpacc1x0);
+ const int32_t vrndacc1x1 = (int32_t) lrintf(vfpacc1x1);
+ const int32_t vrndacc1x2 = (int32_t) lrintf(vfpacc1x2);
+ const int32_t vrndacc1x3 = (int32_t) lrintf(vfpacc1x3);
+ const int32_t vrndacc2x0 = (int32_t) lrintf(vfpacc2x0);
+ const int32_t vrndacc2x1 = (int32_t) lrintf(vfpacc2x1);
+ const int32_t vrndacc2x2 = (int32_t) lrintf(vfpacc2x2);
+ const int32_t vrndacc2x3 = (int32_t) lrintf(vfpacc2x3);
- const int32_t vmagic_bias_less_output_zero_point = params->scalar_magic.magic_bias_less_output_zero_point;
- int32_t vout0x0 = (int32_t) fp32_to_bits(vfpacc0x0) - vmagic_bias_less_output_zero_point;
- int32_t vout0x1 = (int32_t) fp32_to_bits(vfpacc0x1) - vmagic_bias_less_output_zero_point;
- int32_t vout0x2 = (int32_t) fp32_to_bits(vfpacc0x2) - vmagic_bias_less_output_zero_point;
- int32_t vout0x3 = (int32_t) fp32_to_bits(vfpacc0x3) - vmagic_bias_less_output_zero_point;
- int32_t vout1x0 = (int32_t) fp32_to_bits(vfpacc1x0) - vmagic_bias_less_output_zero_point;
- int32_t vout1x1 = (int32_t) fp32_to_bits(vfpacc1x1) - vmagic_bias_less_output_zero_point;
- int32_t vout1x2 = (int32_t) fp32_to_bits(vfpacc1x2) - vmagic_bias_less_output_zero_point;
- int32_t vout1x3 = (int32_t) fp32_to_bits(vfpacc1x3) - vmagic_bias_less_output_zero_point;
- int32_t vout2x0 = (int32_t) fp32_to_bits(vfpacc2x0) - vmagic_bias_less_output_zero_point;
- int32_t vout2x1 = (int32_t) fp32_to_bits(vfpacc2x1) - vmagic_bias_less_output_zero_point;
- int32_t vout2x2 = (int32_t) fp32_to_bits(vfpacc2x2) - vmagic_bias_less_output_zero_point;
- int32_t vout2x3 = (int32_t) fp32_to_bits(vfpacc2x3) - vmagic_bias_less_output_zero_point;
+ const int32_t voutput_zero_point = params->scalar_lrintf.output_zero_point;
+ int32_t vout0x0 = vrndacc0x0 + voutput_zero_point;
+ int32_t vout0x1 = vrndacc0x1 + voutput_zero_point;
+ int32_t vout0x2 = vrndacc0x2 + voutput_zero_point;
+ int32_t vout0x3 = vrndacc0x3 + voutput_zero_point;
+ int32_t vout1x0 = vrndacc1x0 + voutput_zero_point;
+ int32_t vout1x1 = vrndacc1x1 + voutput_zero_point;
+ int32_t vout1x2 = vrndacc1x2 + voutput_zero_point;
+ int32_t vout1x3 = vrndacc1x3 + voutput_zero_point;
+ int32_t vout2x0 = vrndacc2x0 + voutput_zero_point;
+ int32_t vout2x1 = vrndacc2x1 + voutput_zero_point;
+ int32_t vout2x2 = vrndacc2x2 + voutput_zero_point;
+ int32_t vout2x3 = vrndacc2x3 + voutput_zero_point;
if XNN_LIKELY(nc >= 4) {
c2[0] = (int8_t) vout2x0;
diff --git a/src/qc8-igemm/gen/4x2-minmax-fp32-scalar-magic.c b/src/qc8-igemm/gen/4x2-minmax-fp32-scalar-fmagic.c
similarity index 94%
rename from src/qc8-igemm/gen/4x2-minmax-fp32-scalar-magic.c
rename to src/qc8-igemm/gen/4x2-minmax-fp32-scalar-fmagic.c
index ec9f89e..4c97274 100644
--- a/src/qc8-igemm/gen/4x2-minmax-fp32-scalar-magic.c
+++ b/src/qc8-igemm/gen/4x2-minmax-fp32-scalar-fmagic.c
@@ -15,7 +15,7 @@
#include <xnnpack/gemm.h>
-void xnn_qc8_igemm_minmax_fp32_ukernel_4x2__scalar_magic(
+void xnn_qc8_igemm_minmax_fp32_ukernel_4x2__scalar_fmagic(
size_t mr,
size_t nc,
size_t kc,
@@ -135,7 +135,7 @@
vfpacc3x1 *= vscale1;
w = (const void*) ((const float*) w + 2);
- const float voutput_min_less_zero_point = params->scalar_magic.output_min_less_zero_point;
+ const float voutput_min_less_zero_point = params->scalar_fmagic.output_min_less_zero_point;
vfpacc0x0 = math_max_f32(vfpacc0x0, voutput_min_less_zero_point);
vfpacc0x1 = math_max_f32(vfpacc0x1, voutput_min_less_zero_point);
vfpacc1x0 = math_max_f32(vfpacc1x0, voutput_min_less_zero_point);
@@ -145,7 +145,7 @@
vfpacc3x0 = math_max_f32(vfpacc3x0, voutput_min_less_zero_point);
vfpacc3x1 = math_max_f32(vfpacc3x1, voutput_min_less_zero_point);
- const float voutput_max_less_zero_point = params->scalar_magic.output_max_less_zero_point;
+ const float voutput_max_less_zero_point = params->scalar_fmagic.output_max_less_zero_point;
vfpacc0x0 = math_min_f32(vfpacc0x0, voutput_max_less_zero_point);
vfpacc0x1 = math_min_f32(vfpacc0x1, voutput_max_less_zero_point);
vfpacc1x0 = math_min_f32(vfpacc1x0, voutput_max_less_zero_point);
@@ -155,7 +155,7 @@
vfpacc3x0 = math_min_f32(vfpacc3x0, voutput_max_less_zero_point);
vfpacc3x1 = math_min_f32(vfpacc3x1, voutput_max_less_zero_point);
- const float vmagic_bias = params->scalar_magic.magic_bias;
+ const float vmagic_bias = params->scalar_fmagic.magic_bias;
vfpacc0x0 += vmagic_bias;
vfpacc0x1 += vmagic_bias;
vfpacc1x0 += vmagic_bias;
@@ -165,7 +165,7 @@
vfpacc3x0 += vmagic_bias;
vfpacc3x1 += vmagic_bias;
- const int32_t vmagic_bias_less_output_zero_point = params->scalar_magic.magic_bias_less_output_zero_point;
+ const int32_t vmagic_bias_less_output_zero_point = params->scalar_fmagic.magic_bias_less_output_zero_point;
int32_t vout0x0 = (int32_t) fp32_to_bits(vfpacc0x0) - vmagic_bias_less_output_zero_point;
int32_t vout0x1 = (int32_t) fp32_to_bits(vfpacc0x1) - vmagic_bias_less_output_zero_point;
int32_t vout1x0 = (int32_t) fp32_to_bits(vfpacc1x0) - vmagic_bias_less_output_zero_point;
diff --git a/src/qc8-igemm/gen/4x2-minmax-fp32-scalar-lrint.c b/src/qc8-igemm/gen/4x2-minmax-fp32-scalar-lrint.c
deleted file mode 100644
index 2aa0328..0000000
--- a/src/qc8-igemm/gen/4x2-minmax-fp32-scalar-lrint.c
+++ /dev/null
@@ -1,204 +0,0 @@
-// Auto-generated file. Do not edit!
-// Template: src/qs8-igemm/scalar.c.in
-// Generator: tools/xngen
-//
-// Copyright 2021 Google LLC
-//
-// This source code is licensed under the BSD-style license found in the
-// LICENSE file in the root directory of this source tree.
-
-#include <assert.h>
-#include <math.h>
-
-#include <xnnpack/math.h>
-#include <xnnpack/gemm.h>
-
-
-void xnn_qc8_igemm_minmax_fp32_ukernel_4x2__scalar_lrint(
- size_t mr,
- size_t nc,
- size_t kc,
- size_t ks,
- const int8_t**restrict a,
- const void*restrict w,
- int8_t*restrict c,
- size_t cm_stride,
- size_t cn_stride,
- size_t a_offset,
- const int8_t* zero,
- const union xnn_qs8_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
-{
- assert(mr != 0);
- assert(mr <= 4);
- assert(nc != 0);
- assert(kc != 0);
- assert(ks != 0);
- assert(ks % (4 * sizeof(void*)) == 0);
- assert(a != NULL);
- assert(w != NULL);
- assert(c != NULL);
-
- int8_t* c0 = c;
- int8_t* c1 = (int8_t*) ((uintptr_t) c0 + cm_stride);
- if XNN_UNPREDICTABLE(mr < 2) {
- c1 = c0;
- }
- int8_t* c2 = (int8_t*) ((uintptr_t) c1 + cm_stride);
- if XNN_UNPREDICTABLE(mr <= 2) {
- c2 = c1;
- }
- int8_t* c3 = (int8_t*) ((uintptr_t) c2 + cm_stride);
- if XNN_UNPREDICTABLE(mr != 4) {
- c3 = c2;
- }
-
- do {
- int32_t vacc0x0 = ((const int32_t*) w)[0];
- int32_t vacc0x1 = ((const int32_t*) w)[1];
- int32_t vacc1x0 = vacc0x0;
- int32_t vacc1x1 = vacc0x1;
- int32_t vacc2x0 = vacc0x0;
- int32_t vacc2x1 = vacc0x1;
- int32_t vacc3x0 = vacc0x0;
- int32_t vacc3x1 = vacc0x1;
- w = (const void*) ((const int32_t*) w + 2);
-
- size_t p = ks;
- do {
- const int8_t* restrict a0 = a[0];
- assert(a0 != NULL);
- if XNN_UNPREDICTABLE(a0 != zero) {
- a0 = (const int8_t*) ((uintptr_t) a0 + a_offset);
- }
- const int8_t* restrict a1 = a[1];
- assert(a1 != NULL);
- if XNN_UNPREDICTABLE(a1 != zero) {
- a1 = (const int8_t*) ((uintptr_t) a1 + a_offset);
- }
- const int8_t* restrict a2 = a[2];
- assert(a2 != NULL);
- if XNN_UNPREDICTABLE(a2 != zero) {
- a2 = (const int8_t*) ((uintptr_t) a2 + a_offset);
- }
- const int8_t* restrict a3 = a[3];
- assert(a3 != NULL);
- if XNN_UNPREDICTABLE(a3 != zero) {
- a3 = (const int8_t*) ((uintptr_t) a3 + a_offset);
- }
- a += 4;
-
- size_t k = kc;
- do {
- const int32_t va0 = (int32_t) *a0++;
- const int32_t va1 = (int32_t) *a1++;
- const int32_t va2 = (int32_t) *a2++;
- const int32_t va3 = (int32_t) *a3++;
-
- const int32_t vb0 = (int32_t) ((const int8_t*) w)[0];
- const int32_t vb1 = (int32_t) ((const int8_t*) w)[1];
- w = (const void*) ((const int8_t*) w + 2);
-
- vacc0x0 += va0 * vb0;
- vacc0x1 += va0 * vb1;
- vacc1x0 += va1 * vb0;
- vacc1x1 += va1 * vb1;
- vacc2x0 += va2 * vb0;
- vacc2x1 += va2 * vb1;
- vacc3x0 += va3 * vb0;
- vacc3x1 += va3 * vb1;
-
- k -= sizeof(int8_t);
- } while (k != 0);
- p -= 4 * sizeof(void*);
- } while (p != 0);
-
- float vfpacc0x0 = (float) vacc0x0;
- float vfpacc0x1 = (float) vacc0x1;
- float vfpacc1x0 = (float) vacc1x0;
- float vfpacc1x1 = (float) vacc1x1;
- float vfpacc2x0 = (float) vacc2x0;
- float vfpacc2x1 = (float) vacc2x1;
- float vfpacc3x0 = (float) vacc3x0;
- float vfpacc3x1 = (float) vacc3x1;
-
- typedef XNN_UNALIGNED float unaligned_float;
- const float vscale0 = ((const unaligned_float*) w)[0];
- vfpacc0x0 *= vscale0;
- vfpacc1x0 *= vscale0;
- vfpacc2x0 *= vscale0;
- vfpacc3x0 *= vscale0;
- const float vscale1 = ((const unaligned_float*) w)[1];
- vfpacc0x1 *= vscale1;
- vfpacc1x1 *= vscale1;
- vfpacc2x1 *= vscale1;
- vfpacc3x1 *= vscale1;
- w = (const void*) ((const float*) w + 2);
-
- long vrndacc0x0 = lrintf(vfpacc0x0);
- long vrndacc0x1 = lrintf(vfpacc0x1);
- long vrndacc1x0 = lrintf(vfpacc1x0);
- long vrndacc1x1 = lrintf(vfpacc1x1);
- long vrndacc2x0 = lrintf(vfpacc2x0);
- long vrndacc2x1 = lrintf(vfpacc2x1);
- long vrndacc3x0 = lrintf(vfpacc3x0);
- long vrndacc3x1 = lrintf(vfpacc3x1);
-
- const long voutput_min_less_zero_point = params->scalar_lrint.output_min_less_zero_point;
- vrndacc0x0 = XNN_UNPREDICTABLE(vrndacc0x0 < voutput_min_less_zero_point) ? voutput_min_less_zero_point : vrndacc0x0;
- vrndacc0x1 = XNN_UNPREDICTABLE(vrndacc0x1 < voutput_min_less_zero_point) ? voutput_min_less_zero_point : vrndacc0x1;
- vrndacc1x0 = XNN_UNPREDICTABLE(vrndacc1x0 < voutput_min_less_zero_point) ? voutput_min_less_zero_point : vrndacc1x0;
- vrndacc1x1 = XNN_UNPREDICTABLE(vrndacc1x1 < voutput_min_less_zero_point) ? voutput_min_less_zero_point : vrndacc1x1;
- vrndacc2x0 = XNN_UNPREDICTABLE(vrndacc2x0 < voutput_min_less_zero_point) ? voutput_min_less_zero_point : vrndacc2x0;
- vrndacc2x1 = XNN_UNPREDICTABLE(vrndacc2x1 < voutput_min_less_zero_point) ? voutput_min_less_zero_point : vrndacc2x1;
- vrndacc3x0 = XNN_UNPREDICTABLE(vrndacc3x0 < voutput_min_less_zero_point) ? voutput_min_less_zero_point : vrndacc3x0;
- vrndacc3x1 = XNN_UNPREDICTABLE(vrndacc3x1 < voutput_min_less_zero_point) ? voutput_min_less_zero_point : vrndacc3x1;
-
- const long voutput_max_less_zero_point = params->scalar_lrint.output_max_less_zero_point;
- vrndacc0x0 = XNN_UNPREDICTABLE(vrndacc0x0 > voutput_max_less_zero_point) ? voutput_max_less_zero_point : vrndacc0x0;
- vrndacc0x1 = XNN_UNPREDICTABLE(vrndacc0x1 > voutput_max_less_zero_point) ? voutput_max_less_zero_point : vrndacc0x1;
- vrndacc1x0 = XNN_UNPREDICTABLE(vrndacc1x0 > voutput_max_less_zero_point) ? voutput_max_less_zero_point : vrndacc1x0;
- vrndacc1x1 = XNN_UNPREDICTABLE(vrndacc1x1 > voutput_max_less_zero_point) ? voutput_max_less_zero_point : vrndacc1x1;
- vrndacc2x0 = XNN_UNPREDICTABLE(vrndacc2x0 > voutput_max_less_zero_point) ? voutput_max_less_zero_point : vrndacc2x0;
- vrndacc2x1 = XNN_UNPREDICTABLE(vrndacc2x1 > voutput_max_less_zero_point) ? voutput_max_less_zero_point : vrndacc2x1;
- vrndacc3x0 = XNN_UNPREDICTABLE(vrndacc3x0 > voutput_max_less_zero_point) ? voutput_max_less_zero_point : vrndacc3x0;
- vrndacc3x1 = XNN_UNPREDICTABLE(vrndacc3x1 > voutput_max_less_zero_point) ? voutput_max_less_zero_point : vrndacc3x1;
-
- const int32_t voutput_zero_point = params->scalar_lrint.output_zero_point;
- int32_t vout0x0 = (int32_t) vrndacc0x0 + voutput_zero_point;
- int32_t vout0x1 = (int32_t) vrndacc0x1 + voutput_zero_point;
- int32_t vout1x0 = (int32_t) vrndacc1x0 + voutput_zero_point;
- int32_t vout1x1 = (int32_t) vrndacc1x1 + voutput_zero_point;
- int32_t vout2x0 = (int32_t) vrndacc2x0 + voutput_zero_point;
- int32_t vout2x1 = (int32_t) vrndacc2x1 + voutput_zero_point;
- int32_t vout3x0 = (int32_t) vrndacc3x0 + voutput_zero_point;
- int32_t vout3x1 = (int32_t) vrndacc3x1 + voutput_zero_point;
-
- if XNN_LIKELY(nc >= 2) {
- c3[0] = (int8_t) vout3x0;
- c3[1] = (int8_t) vout3x1;
- c2[0] = (int8_t) vout2x0;
- c2[1] = (int8_t) vout2x1;
- c1[0] = (int8_t) vout1x0;
- c1[1] = (int8_t) vout1x1;
- c0[0] = (int8_t) vout0x0;
- c0[1] = (int8_t) vout0x1;
-
- c3 = (int8_t*) ((uintptr_t) c3 + cn_stride);
- c2 = (int8_t*) ((uintptr_t) c2 + cn_stride);
- c1 = (int8_t*) ((uintptr_t) c1 + cn_stride);
- c0 = (int8_t*) ((uintptr_t) c0 + cn_stride);
-
- a = (const int8_t**restrict) ((uintptr_t) a - ks);
- nc -= 2;
- } else {
- if (nc & 1) {
- c3[0] = (int8_t) vout3x0;
- c2[0] = (int8_t) vout2x0;
- c1[0] = (int8_t) vout1x0;
- c0[0] = (int8_t) vout0x0;
- }
-
- nc = 0;
- }
- } while (nc != 0);
-}
diff --git a/src/qc8-igemm/gen/4x2-minmax-fp32-scalar-magic.c b/src/qc8-igemm/gen/4x2-minmax-fp32-scalar-lrintf.c
similarity index 79%
copy from src/qc8-igemm/gen/4x2-minmax-fp32-scalar-magic.c
copy to src/qc8-igemm/gen/4x2-minmax-fp32-scalar-lrintf.c
index ec9f89e..2edf8f7 100644
--- a/src/qc8-igemm/gen/4x2-minmax-fp32-scalar-magic.c
+++ b/src/qc8-igemm/gen/4x2-minmax-fp32-scalar-lrintf.c
@@ -8,14 +8,13 @@
// LICENSE file in the root directory of this source tree.
#include <assert.h>
-
-#include <fp16.h>
+#include <math.h>
#include <xnnpack/math.h>
#include <xnnpack/gemm.h>
-void xnn_qc8_igemm_minmax_fp32_ukernel_4x2__scalar_magic(
+void xnn_qc8_igemm_minmax_fp32_ukernel_4x2__scalar_lrintf(
size_t mr,
size_t nc,
size_t kc,
@@ -135,7 +134,7 @@
vfpacc3x1 *= vscale1;
w = (const void*) ((const float*) w + 2);
- const float voutput_min_less_zero_point = params->scalar_magic.output_min_less_zero_point;
+ const float voutput_min_less_zero_point = params->scalar_lrintf.output_min_less_zero_point;
vfpacc0x0 = math_max_f32(vfpacc0x0, voutput_min_less_zero_point);
vfpacc0x1 = math_max_f32(vfpacc0x1, voutput_min_less_zero_point);
vfpacc1x0 = math_max_f32(vfpacc1x0, voutput_min_less_zero_point);
@@ -145,7 +144,7 @@
vfpacc3x0 = math_max_f32(vfpacc3x0, voutput_min_less_zero_point);
vfpacc3x1 = math_max_f32(vfpacc3x1, voutput_min_less_zero_point);
- const float voutput_max_less_zero_point = params->scalar_magic.output_max_less_zero_point;
+ const float voutput_max_less_zero_point = params->scalar_lrintf.output_max_less_zero_point;
vfpacc0x0 = math_min_f32(vfpacc0x0, voutput_max_less_zero_point);
vfpacc0x1 = math_min_f32(vfpacc0x1, voutput_max_less_zero_point);
vfpacc1x0 = math_min_f32(vfpacc1x0, voutput_max_less_zero_point);
@@ -155,25 +154,24 @@
vfpacc3x0 = math_min_f32(vfpacc3x0, voutput_max_less_zero_point);
vfpacc3x1 = math_min_f32(vfpacc3x1, voutput_max_less_zero_point);
- const float vmagic_bias = params->scalar_magic.magic_bias;
- vfpacc0x0 += vmagic_bias;
- vfpacc0x1 += vmagic_bias;
- vfpacc1x0 += vmagic_bias;
- vfpacc1x1 += vmagic_bias;
- vfpacc2x0 += vmagic_bias;
- vfpacc2x1 += vmagic_bias;
- vfpacc3x0 += vmagic_bias;
- vfpacc3x1 += vmagic_bias;
+ const int32_t vrndacc0x0 = (int32_t) lrintf(vfpacc0x0);
+ const int32_t vrndacc0x1 = (int32_t) lrintf(vfpacc0x1);
+ const int32_t vrndacc1x0 = (int32_t) lrintf(vfpacc1x0);
+ const int32_t vrndacc1x1 = (int32_t) lrintf(vfpacc1x1);
+ const int32_t vrndacc2x0 = (int32_t) lrintf(vfpacc2x0);
+ const int32_t vrndacc2x1 = (int32_t) lrintf(vfpacc2x1);
+ const int32_t vrndacc3x0 = (int32_t) lrintf(vfpacc3x0);
+ const int32_t vrndacc3x1 = (int32_t) lrintf(vfpacc3x1);
- const int32_t vmagic_bias_less_output_zero_point = params->scalar_magic.magic_bias_less_output_zero_point;
- int32_t vout0x0 = (int32_t) fp32_to_bits(vfpacc0x0) - vmagic_bias_less_output_zero_point;
- int32_t vout0x1 = (int32_t) fp32_to_bits(vfpacc0x1) - vmagic_bias_less_output_zero_point;
- int32_t vout1x0 = (int32_t) fp32_to_bits(vfpacc1x0) - vmagic_bias_less_output_zero_point;
- int32_t vout1x1 = (int32_t) fp32_to_bits(vfpacc1x1) - vmagic_bias_less_output_zero_point;
- int32_t vout2x0 = (int32_t) fp32_to_bits(vfpacc2x0) - vmagic_bias_less_output_zero_point;
- int32_t vout2x1 = (int32_t) fp32_to_bits(vfpacc2x1) - vmagic_bias_less_output_zero_point;
- int32_t vout3x0 = (int32_t) fp32_to_bits(vfpacc3x0) - vmagic_bias_less_output_zero_point;
- int32_t vout3x1 = (int32_t) fp32_to_bits(vfpacc3x1) - vmagic_bias_less_output_zero_point;
+ const int32_t voutput_zero_point = params->scalar_lrintf.output_zero_point;
+ int32_t vout0x0 = vrndacc0x0 + voutput_zero_point;
+ int32_t vout0x1 = vrndacc0x1 + voutput_zero_point;
+ int32_t vout1x0 = vrndacc1x0 + voutput_zero_point;
+ int32_t vout1x1 = vrndacc1x1 + voutput_zero_point;
+ int32_t vout2x0 = vrndacc2x0 + voutput_zero_point;
+ int32_t vout2x1 = vrndacc2x1 + voutput_zero_point;
+ int32_t vout3x0 = vrndacc3x0 + voutput_zero_point;
+ int32_t vout3x1 = vrndacc3x1 + voutput_zero_point;
if XNN_LIKELY(nc >= 2) {
c3[0] = (int8_t) vout3x0;
diff --git a/src/qc8-igemm/gen/4x4-minmax-fp32-scalar-magic.c b/src/qc8-igemm/gen/4x4-minmax-fp32-scalar-fmagic.c
similarity index 96%
rename from src/qc8-igemm/gen/4x4-minmax-fp32-scalar-magic.c
rename to src/qc8-igemm/gen/4x4-minmax-fp32-scalar-fmagic.c
index 729217b..7096f00 100644
--- a/src/qc8-igemm/gen/4x4-minmax-fp32-scalar-magic.c
+++ b/src/qc8-igemm/gen/4x4-minmax-fp32-scalar-fmagic.c
@@ -15,7 +15,7 @@
#include <xnnpack/gemm.h>
-void xnn_qc8_igemm_minmax_fp32_ukernel_4x4__scalar_magic(
+void xnn_qc8_igemm_minmax_fp32_ukernel_4x4__scalar_fmagic(
size_t mr,
size_t nc,
size_t kc,
@@ -170,7 +170,7 @@
vfpacc3x3 *= vscale3;
w = (const void*) ((const float*) w + 4);
- const float voutput_min_less_zero_point = params->scalar_magic.output_min_less_zero_point;
+ const float voutput_min_less_zero_point = params->scalar_fmagic.output_min_less_zero_point;
vfpacc0x0 = math_max_f32(vfpacc0x0, voutput_min_less_zero_point);
vfpacc0x1 = math_max_f32(vfpacc0x1, voutput_min_less_zero_point);
vfpacc0x2 = math_max_f32(vfpacc0x2, voutput_min_less_zero_point);
@@ -188,7 +188,7 @@
vfpacc3x2 = math_max_f32(vfpacc3x2, voutput_min_less_zero_point);
vfpacc3x3 = math_max_f32(vfpacc3x3, voutput_min_less_zero_point);
- const float voutput_max_less_zero_point = params->scalar_magic.output_max_less_zero_point;
+ const float voutput_max_less_zero_point = params->scalar_fmagic.output_max_less_zero_point;
vfpacc0x0 = math_min_f32(vfpacc0x0, voutput_max_less_zero_point);
vfpacc0x1 = math_min_f32(vfpacc0x1, voutput_max_less_zero_point);
vfpacc0x2 = math_min_f32(vfpacc0x2, voutput_max_less_zero_point);
@@ -206,7 +206,7 @@
vfpacc3x2 = math_min_f32(vfpacc3x2, voutput_max_less_zero_point);
vfpacc3x3 = math_min_f32(vfpacc3x3, voutput_max_less_zero_point);
- const float vmagic_bias = params->scalar_magic.magic_bias;
+ const float vmagic_bias = params->scalar_fmagic.magic_bias;
vfpacc0x0 += vmagic_bias;
vfpacc0x1 += vmagic_bias;
vfpacc0x2 += vmagic_bias;
@@ -224,7 +224,7 @@
vfpacc3x2 += vmagic_bias;
vfpacc3x3 += vmagic_bias;
- const int32_t vmagic_bias_less_output_zero_point = params->scalar_magic.magic_bias_less_output_zero_point;
+ const int32_t vmagic_bias_less_output_zero_point = params->scalar_fmagic.magic_bias_less_output_zero_point;
int32_t vout0x0 = (int32_t) fp32_to_bits(vfpacc0x0) - vmagic_bias_less_output_zero_point;
int32_t vout0x1 = (int32_t) fp32_to_bits(vfpacc0x1) - vmagic_bias_less_output_zero_point;
int32_t vout0x2 = (int32_t) fp32_to_bits(vfpacc0x2) - vmagic_bias_less_output_zero_point;
diff --git a/src/qc8-igemm/gen/4x4-minmax-fp32-scalar-lrint.c b/src/qc8-igemm/gen/4x4-minmax-fp32-scalar-lrint.c
deleted file mode 100644
index df19cd1..0000000
--- a/src/qc8-igemm/gen/4x4-minmax-fp32-scalar-lrint.c
+++ /dev/null
@@ -1,297 +0,0 @@
-// Auto-generated file. Do not edit!
-// Template: src/qs8-igemm/scalar.c.in
-// Generator: tools/xngen
-//
-// Copyright 2021 Google LLC
-//
-// This source code is licensed under the BSD-style license found in the
-// LICENSE file in the root directory of this source tree.
-
-#include <assert.h>
-#include <math.h>
-
-#include <xnnpack/math.h>
-#include <xnnpack/gemm.h>
-
-
-void xnn_qc8_igemm_minmax_fp32_ukernel_4x4__scalar_lrint(
- size_t mr,
- size_t nc,
- size_t kc,
- size_t ks,
- const int8_t**restrict a,
- const void*restrict w,
- int8_t*restrict c,
- size_t cm_stride,
- size_t cn_stride,
- size_t a_offset,
- const int8_t* zero,
- const union xnn_qs8_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
-{
- assert(mr != 0);
- assert(mr <= 4);
- assert(nc != 0);
- assert(kc != 0);
- assert(ks != 0);
- assert(ks % (4 * sizeof(void*)) == 0);
- assert(a != NULL);
- assert(w != NULL);
- assert(c != NULL);
-
- int8_t* c0 = c;
- int8_t* c1 = (int8_t*) ((uintptr_t) c0 + cm_stride);
- if XNN_UNPREDICTABLE(mr < 2) {
- c1 = c0;
- }
- int8_t* c2 = (int8_t*) ((uintptr_t) c1 + cm_stride);
- if XNN_UNPREDICTABLE(mr <= 2) {
- c2 = c1;
- }
- int8_t* c3 = (int8_t*) ((uintptr_t) c2 + cm_stride);
- if XNN_UNPREDICTABLE(mr != 4) {
- c3 = c2;
- }
-
- do {
- int32_t vacc0x0 = ((const int32_t*) w)[0];
- int32_t vacc0x1 = ((const int32_t*) w)[1];
- int32_t vacc0x2 = ((const int32_t*) w)[2];
- int32_t vacc0x3 = ((const int32_t*) w)[3];
- int32_t vacc1x0 = vacc0x0;
- int32_t vacc1x1 = vacc0x1;
- int32_t vacc1x2 = vacc0x2;
- int32_t vacc1x3 = vacc0x3;
- int32_t vacc2x0 = vacc0x0;
- int32_t vacc2x1 = vacc0x1;
- int32_t vacc2x2 = vacc0x2;
- int32_t vacc2x3 = vacc0x3;
- int32_t vacc3x0 = vacc0x0;
- int32_t vacc3x1 = vacc0x1;
- int32_t vacc3x2 = vacc0x2;
- int32_t vacc3x3 = vacc0x3;
- w = (const void*) ((const int32_t*) w + 4);
-
- size_t p = ks;
- do {
- const int8_t* restrict a0 = a[0];
- assert(a0 != NULL);
- if XNN_UNPREDICTABLE(a0 != zero) {
- a0 = (const int8_t*) ((uintptr_t) a0 + a_offset);
- }
- const int8_t* restrict a1 = a[1];
- assert(a1 != NULL);
- if XNN_UNPREDICTABLE(a1 != zero) {
- a1 = (const int8_t*) ((uintptr_t) a1 + a_offset);
- }
- const int8_t* restrict a2 = a[2];
- assert(a2 != NULL);
- if XNN_UNPREDICTABLE(a2 != zero) {
- a2 = (const int8_t*) ((uintptr_t) a2 + a_offset);
- }
- const int8_t* restrict a3 = a[3];
- assert(a3 != NULL);
- if XNN_UNPREDICTABLE(a3 != zero) {
- a3 = (const int8_t*) ((uintptr_t) a3 + a_offset);
- }
- a += 4;
-
- size_t k = kc;
- do {
- const int32_t va0 = (int32_t) *a0++;
- const int32_t va1 = (int32_t) *a1++;
- const int32_t va2 = (int32_t) *a2++;
- const int32_t va3 = (int32_t) *a3++;
-
- const int32_t vb0 = (int32_t) ((const int8_t*) w)[0];
- const int32_t vb1 = (int32_t) ((const int8_t*) w)[1];
- const int32_t vb2 = (int32_t) ((const int8_t*) w)[2];
- const int32_t vb3 = (int32_t) ((const int8_t*) w)[3];
- w = (const void*) ((const int8_t*) w + 4);
-
- vacc0x0 += va0 * vb0;
- vacc0x1 += va0 * vb1;
- vacc0x2 += va0 * vb2;
- vacc0x3 += va0 * vb3;
- vacc1x0 += va1 * vb0;
- vacc1x1 += va1 * vb1;
- vacc1x2 += va1 * vb2;
- vacc1x3 += va1 * vb3;
- vacc2x0 += va2 * vb0;
- vacc2x1 += va2 * vb1;
- vacc2x2 += va2 * vb2;
- vacc2x3 += va2 * vb3;
- vacc3x0 += va3 * vb0;
- vacc3x1 += va3 * vb1;
- vacc3x2 += va3 * vb2;
- vacc3x3 += va3 * vb3;
-
- k -= sizeof(int8_t);
- } while (k != 0);
- p -= 4 * sizeof(void*);
- } while (p != 0);
-
- float vfpacc0x0 = (float) vacc0x0;
- float vfpacc0x1 = (float) vacc0x1;
- float vfpacc0x2 = (float) vacc0x2;
- float vfpacc0x3 = (float) vacc0x3;
- float vfpacc1x0 = (float) vacc1x0;
- float vfpacc1x1 = (float) vacc1x1;
- float vfpacc1x2 = (float) vacc1x2;
- float vfpacc1x3 = (float) vacc1x3;
- float vfpacc2x0 = (float) vacc2x0;
- float vfpacc2x1 = (float) vacc2x1;
- float vfpacc2x2 = (float) vacc2x2;
- float vfpacc2x3 = (float) vacc2x3;
- float vfpacc3x0 = (float) vacc3x0;
- float vfpacc3x1 = (float) vacc3x1;
- float vfpacc3x2 = (float) vacc3x2;
- float vfpacc3x3 = (float) vacc3x3;
-
- const float vscale0 = ((const float*) w)[0];
- vfpacc0x0 *= vscale0;
- vfpacc1x0 *= vscale0;
- vfpacc2x0 *= vscale0;
- vfpacc3x0 *= vscale0;
- const float vscale1 = ((const float*) w)[1];
- vfpacc0x1 *= vscale1;
- vfpacc1x1 *= vscale1;
- vfpacc2x1 *= vscale1;
- vfpacc3x1 *= vscale1;
- const float vscale2 = ((const float*) w)[2];
- vfpacc0x2 *= vscale2;
- vfpacc1x2 *= vscale2;
- vfpacc2x2 *= vscale2;
- vfpacc3x2 *= vscale2;
- const float vscale3 = ((const float*) w)[3];
- vfpacc0x3 *= vscale3;
- vfpacc1x3 *= vscale3;
- vfpacc2x3 *= vscale3;
- vfpacc3x3 *= vscale3;
- w = (const void*) ((const float*) w + 4);
-
- long vrndacc0x0 = lrintf(vfpacc0x0);
- long vrndacc0x1 = lrintf(vfpacc0x1);
- long vrndacc0x2 = lrintf(vfpacc0x2);
- long vrndacc0x3 = lrintf(vfpacc0x3);
- long vrndacc1x0 = lrintf(vfpacc1x0);
- long vrndacc1x1 = lrintf(vfpacc1x1);
- long vrndacc1x2 = lrintf(vfpacc1x2);
- long vrndacc1x3 = lrintf(vfpacc1x3);
- long vrndacc2x0 = lrintf(vfpacc2x0);
- long vrndacc2x1 = lrintf(vfpacc2x1);
- long vrndacc2x2 = lrintf(vfpacc2x2);
- long vrndacc2x3 = lrintf(vfpacc2x3);
- long vrndacc3x0 = lrintf(vfpacc3x0);
- long vrndacc3x1 = lrintf(vfpacc3x1);
- long vrndacc3x2 = lrintf(vfpacc3x2);
- long vrndacc3x3 = lrintf(vfpacc3x3);
-
- const long voutput_min_less_zero_point = params->scalar_lrint.output_min_less_zero_point;
- vrndacc0x0 = XNN_UNPREDICTABLE(vrndacc0x0 < voutput_min_less_zero_point) ? voutput_min_less_zero_point : vrndacc0x0;
- vrndacc0x1 = XNN_UNPREDICTABLE(vrndacc0x1 < voutput_min_less_zero_point) ? voutput_min_less_zero_point : vrndacc0x1;
- vrndacc0x2 = XNN_UNPREDICTABLE(vrndacc0x2 < voutput_min_less_zero_point) ? voutput_min_less_zero_point : vrndacc0x2;
- vrndacc0x3 = XNN_UNPREDICTABLE(vrndacc0x3 < voutput_min_less_zero_point) ? voutput_min_less_zero_point : vrndacc0x3;
- vrndacc1x0 = XNN_UNPREDICTABLE(vrndacc1x0 < voutput_min_less_zero_point) ? voutput_min_less_zero_point : vrndacc1x0;
- vrndacc1x1 = XNN_UNPREDICTABLE(vrndacc1x1 < voutput_min_less_zero_point) ? voutput_min_less_zero_point : vrndacc1x1;
- vrndacc1x2 = XNN_UNPREDICTABLE(vrndacc1x2 < voutput_min_less_zero_point) ? voutput_min_less_zero_point : vrndacc1x2;
- vrndacc1x3 = XNN_UNPREDICTABLE(vrndacc1x3 < voutput_min_less_zero_point) ? voutput_min_less_zero_point : vrndacc1x3;
- vrndacc2x0 = XNN_UNPREDICTABLE(vrndacc2x0 < voutput_min_less_zero_point) ? voutput_min_less_zero_point : vrndacc2x0;
- vrndacc2x1 = XNN_UNPREDICTABLE(vrndacc2x1 < voutput_min_less_zero_point) ? voutput_min_less_zero_point : vrndacc2x1;
- vrndacc2x2 = XNN_UNPREDICTABLE(vrndacc2x2 < voutput_min_less_zero_point) ? voutput_min_less_zero_point : vrndacc2x2;
- vrndacc2x3 = XNN_UNPREDICTABLE(vrndacc2x3 < voutput_min_less_zero_point) ? voutput_min_less_zero_point : vrndacc2x3;
- vrndacc3x0 = XNN_UNPREDICTABLE(vrndacc3x0 < voutput_min_less_zero_point) ? voutput_min_less_zero_point : vrndacc3x0;
- vrndacc3x1 = XNN_UNPREDICTABLE(vrndacc3x1 < voutput_min_less_zero_point) ? voutput_min_less_zero_point : vrndacc3x1;
- vrndacc3x2 = XNN_UNPREDICTABLE(vrndacc3x2 < voutput_min_less_zero_point) ? voutput_min_less_zero_point : vrndacc3x2;
- vrndacc3x3 = XNN_UNPREDICTABLE(vrndacc3x3 < voutput_min_less_zero_point) ? voutput_min_less_zero_point : vrndacc3x3;
-
- const long voutput_max_less_zero_point = params->scalar_lrint.output_max_less_zero_point;
- vrndacc0x0 = XNN_UNPREDICTABLE(vrndacc0x0 > voutput_max_less_zero_point) ? voutput_max_less_zero_point : vrndacc0x0;
- vrndacc0x1 = XNN_UNPREDICTABLE(vrndacc0x1 > voutput_max_less_zero_point) ? voutput_max_less_zero_point : vrndacc0x1;
- vrndacc0x2 = XNN_UNPREDICTABLE(vrndacc0x2 > voutput_max_less_zero_point) ? voutput_max_less_zero_point : vrndacc0x2;
- vrndacc0x3 = XNN_UNPREDICTABLE(vrndacc0x3 > voutput_max_less_zero_point) ? voutput_max_less_zero_point : vrndacc0x3;
- vrndacc1x0 = XNN_UNPREDICTABLE(vrndacc1x0 > voutput_max_less_zero_point) ? voutput_max_less_zero_point : vrndacc1x0;
- vrndacc1x1 = XNN_UNPREDICTABLE(vrndacc1x1 > voutput_max_less_zero_point) ? voutput_max_less_zero_point : vrndacc1x1;
- vrndacc1x2 = XNN_UNPREDICTABLE(vrndacc1x2 > voutput_max_less_zero_point) ? voutput_max_less_zero_point : vrndacc1x2;
- vrndacc1x3 = XNN_UNPREDICTABLE(vrndacc1x3 > voutput_max_less_zero_point) ? voutput_max_less_zero_point : vrndacc1x3;
- vrndacc2x0 = XNN_UNPREDICTABLE(vrndacc2x0 > voutput_max_less_zero_point) ? voutput_max_less_zero_point : vrndacc2x0;
- vrndacc2x1 = XNN_UNPREDICTABLE(vrndacc2x1 > voutput_max_less_zero_point) ? voutput_max_less_zero_point : vrndacc2x1;
- vrndacc2x2 = XNN_UNPREDICTABLE(vrndacc2x2 > voutput_max_less_zero_point) ? voutput_max_less_zero_point : vrndacc2x2;
- vrndacc2x3 = XNN_UNPREDICTABLE(vrndacc2x3 > voutput_max_less_zero_point) ? voutput_max_less_zero_point : vrndacc2x3;
- vrndacc3x0 = XNN_UNPREDICTABLE(vrndacc3x0 > voutput_max_less_zero_point) ? voutput_max_less_zero_point : vrndacc3x0;
- vrndacc3x1 = XNN_UNPREDICTABLE(vrndacc3x1 > voutput_max_less_zero_point) ? voutput_max_less_zero_point : vrndacc3x1;
- vrndacc3x2 = XNN_UNPREDICTABLE(vrndacc3x2 > voutput_max_less_zero_point) ? voutput_max_less_zero_point : vrndacc3x2;
- vrndacc3x3 = XNN_UNPREDICTABLE(vrndacc3x3 > voutput_max_less_zero_point) ? voutput_max_less_zero_point : vrndacc3x3;
-
- const int32_t voutput_zero_point = params->scalar_lrint.output_zero_point;
- int32_t vout0x0 = (int32_t) vrndacc0x0 + voutput_zero_point;
- int32_t vout0x1 = (int32_t) vrndacc0x1 + voutput_zero_point;
- int32_t vout0x2 = (int32_t) vrndacc0x2 + voutput_zero_point;
- int32_t vout0x3 = (int32_t) vrndacc0x3 + voutput_zero_point;
- int32_t vout1x0 = (int32_t) vrndacc1x0 + voutput_zero_point;
- int32_t vout1x1 = (int32_t) vrndacc1x1 + voutput_zero_point;
- int32_t vout1x2 = (int32_t) vrndacc1x2 + voutput_zero_point;
- int32_t vout1x3 = (int32_t) vrndacc1x3 + voutput_zero_point;
- int32_t vout2x0 = (int32_t) vrndacc2x0 + voutput_zero_point;
- int32_t vout2x1 = (int32_t) vrndacc2x1 + voutput_zero_point;
- int32_t vout2x2 = (int32_t) vrndacc2x2 + voutput_zero_point;
- int32_t vout2x3 = (int32_t) vrndacc2x3 + voutput_zero_point;
- int32_t vout3x0 = (int32_t) vrndacc3x0 + voutput_zero_point;
- int32_t vout3x1 = (int32_t) vrndacc3x1 + voutput_zero_point;
- int32_t vout3x2 = (int32_t) vrndacc3x2 + voutput_zero_point;
- int32_t vout3x3 = (int32_t) vrndacc3x3 + voutput_zero_point;
-
- if XNN_LIKELY(nc >= 4) {
- c3[0] = (int8_t) vout3x0;
- c3[1] = (int8_t) vout3x1;
- c3[2] = (int8_t) vout3x2;
- c3[3] = (int8_t) vout3x3;
- c2[0] = (int8_t) vout2x0;
- c2[1] = (int8_t) vout2x1;
- c2[2] = (int8_t) vout2x2;
- c2[3] = (int8_t) vout2x3;
- c1[0] = (int8_t) vout1x0;
- c1[1] = (int8_t) vout1x1;
- c1[2] = (int8_t) vout1x2;
- c1[3] = (int8_t) vout1x3;
- c0[0] = (int8_t) vout0x0;
- c0[1] = (int8_t) vout0x1;
- c0[2] = (int8_t) vout0x2;
- c0[3] = (int8_t) vout0x3;
-
- c3 = (int8_t*) ((uintptr_t) c3 + cn_stride);
- c2 = (int8_t*) ((uintptr_t) c2 + cn_stride);
- c1 = (int8_t*) ((uintptr_t) c1 + cn_stride);
- c0 = (int8_t*) ((uintptr_t) c0 + cn_stride);
-
- a = (const int8_t**restrict) ((uintptr_t) a - ks);
- nc -= 4;
- } else {
- if (nc & 2) {
- c3[0] = (int8_t) vout3x0;
- c3[1] = (int8_t) vout3x1;
- vout3x0 = vout3x2;
- c3 += 2;
- c2[0] = (int8_t) vout2x0;
- c2[1] = (int8_t) vout2x1;
- vout2x0 = vout2x2;
- c2 += 2;
- c1[0] = (int8_t) vout1x0;
- c1[1] = (int8_t) vout1x1;
- vout1x0 = vout1x2;
- c1 += 2;
- c0[0] = (int8_t) vout0x0;
- c0[1] = (int8_t) vout0x1;
- vout0x0 = vout0x2;
- c0 += 2;
- }
- if (nc & 1) {
- c3[0] = (int8_t) vout3x0;
- c2[0] = (int8_t) vout2x0;
- c1[0] = (int8_t) vout1x0;
- c0[0] = (int8_t) vout0x0;
- }
-
- nc = 0;
- }
- } while (nc != 0);
-}
diff --git a/src/qc8-igemm/gen/4x4-minmax-fp32-scalar-magic.c b/src/qc8-igemm/gen/4x4-minmax-fp32-scalar-lrintf.c
similarity index 77%
copy from src/qc8-igemm/gen/4x4-minmax-fp32-scalar-magic.c
copy to src/qc8-igemm/gen/4x4-minmax-fp32-scalar-lrintf.c
index 729217b..1665c81 100644
--- a/src/qc8-igemm/gen/4x4-minmax-fp32-scalar-magic.c
+++ b/src/qc8-igemm/gen/4x4-minmax-fp32-scalar-lrintf.c
@@ -8,14 +8,13 @@
// LICENSE file in the root directory of this source tree.
#include <assert.h>
-
-#include <fp16.h>
+#include <math.h>
#include <xnnpack/math.h>
#include <xnnpack/gemm.h>
-void xnn_qc8_igemm_minmax_fp32_ukernel_4x4__scalar_magic(
+void xnn_qc8_igemm_minmax_fp32_ukernel_4x4__scalar_lrintf(
size_t mr,
size_t nc,
size_t kc,
@@ -170,7 +169,7 @@
vfpacc3x3 *= vscale3;
w = (const void*) ((const float*) w + 4);
- const float voutput_min_less_zero_point = params->scalar_magic.output_min_less_zero_point;
+ const float voutput_min_less_zero_point = params->scalar_lrintf.output_min_less_zero_point;
vfpacc0x0 = math_max_f32(vfpacc0x0, voutput_min_less_zero_point);
vfpacc0x1 = math_max_f32(vfpacc0x1, voutput_min_less_zero_point);
vfpacc0x2 = math_max_f32(vfpacc0x2, voutput_min_less_zero_point);
@@ -188,7 +187,7 @@
vfpacc3x2 = math_max_f32(vfpacc3x2, voutput_min_less_zero_point);
vfpacc3x3 = math_max_f32(vfpacc3x3, voutput_min_less_zero_point);
- const float voutput_max_less_zero_point = params->scalar_magic.output_max_less_zero_point;
+ const float voutput_max_less_zero_point = params->scalar_lrintf.output_max_less_zero_point;
vfpacc0x0 = math_min_f32(vfpacc0x0, voutput_max_less_zero_point);
vfpacc0x1 = math_min_f32(vfpacc0x1, voutput_max_less_zero_point);
vfpacc0x2 = math_min_f32(vfpacc0x2, voutput_max_less_zero_point);
@@ -206,41 +205,40 @@
vfpacc3x2 = math_min_f32(vfpacc3x2, voutput_max_less_zero_point);
vfpacc3x3 = math_min_f32(vfpacc3x3, voutput_max_less_zero_point);
- const float vmagic_bias = params->scalar_magic.magic_bias;
- vfpacc0x0 += vmagic_bias;
- vfpacc0x1 += vmagic_bias;
- vfpacc0x2 += vmagic_bias;
- vfpacc0x3 += vmagic_bias;
- vfpacc1x0 += vmagic_bias;
- vfpacc1x1 += vmagic_bias;
- vfpacc1x2 += vmagic_bias;
- vfpacc1x3 += vmagic_bias;
- vfpacc2x0 += vmagic_bias;
- vfpacc2x1 += vmagic_bias;
- vfpacc2x2 += vmagic_bias;
- vfpacc2x3 += vmagic_bias;
- vfpacc3x0 += vmagic_bias;
- vfpacc3x1 += vmagic_bias;
- vfpacc3x2 += vmagic_bias;
- vfpacc3x3 += vmagic_bias;
+ const int32_t vrndacc0x0 = (int32_t) lrintf(vfpacc0x0);
+ const int32_t vrndacc0x1 = (int32_t) lrintf(vfpacc0x1);
+ const int32_t vrndacc0x2 = (int32_t) lrintf(vfpacc0x2);
+ const int32_t vrndacc0x3 = (int32_t) lrintf(vfpacc0x3);
+ const int32_t vrndacc1x0 = (int32_t) lrintf(vfpacc1x0);
+ const int32_t vrndacc1x1 = (int32_t) lrintf(vfpacc1x1);
+ const int32_t vrndacc1x2 = (int32_t) lrintf(vfpacc1x2);
+ const int32_t vrndacc1x3 = (int32_t) lrintf(vfpacc1x3);
+ const int32_t vrndacc2x0 = (int32_t) lrintf(vfpacc2x0);
+ const int32_t vrndacc2x1 = (int32_t) lrintf(vfpacc2x1);
+ const int32_t vrndacc2x2 = (int32_t) lrintf(vfpacc2x2);
+ const int32_t vrndacc2x3 = (int32_t) lrintf(vfpacc2x3);
+ const int32_t vrndacc3x0 = (int32_t) lrintf(vfpacc3x0);
+ const int32_t vrndacc3x1 = (int32_t) lrintf(vfpacc3x1);
+ const int32_t vrndacc3x2 = (int32_t) lrintf(vfpacc3x2);
+ const int32_t vrndacc3x3 = (int32_t) lrintf(vfpacc3x3);
- const int32_t vmagic_bias_less_output_zero_point = params->scalar_magic.magic_bias_less_output_zero_point;
- int32_t vout0x0 = (int32_t) fp32_to_bits(vfpacc0x0) - vmagic_bias_less_output_zero_point;
- int32_t vout0x1 = (int32_t) fp32_to_bits(vfpacc0x1) - vmagic_bias_less_output_zero_point;
- int32_t vout0x2 = (int32_t) fp32_to_bits(vfpacc0x2) - vmagic_bias_less_output_zero_point;
- int32_t vout0x3 = (int32_t) fp32_to_bits(vfpacc0x3) - vmagic_bias_less_output_zero_point;
- int32_t vout1x0 = (int32_t) fp32_to_bits(vfpacc1x0) - vmagic_bias_less_output_zero_point;
- int32_t vout1x1 = (int32_t) fp32_to_bits(vfpacc1x1) - vmagic_bias_less_output_zero_point;
- int32_t vout1x2 = (int32_t) fp32_to_bits(vfpacc1x2) - vmagic_bias_less_output_zero_point;
- int32_t vout1x3 = (int32_t) fp32_to_bits(vfpacc1x3) - vmagic_bias_less_output_zero_point;
- int32_t vout2x0 = (int32_t) fp32_to_bits(vfpacc2x0) - vmagic_bias_less_output_zero_point;
- int32_t vout2x1 = (int32_t) fp32_to_bits(vfpacc2x1) - vmagic_bias_less_output_zero_point;
- int32_t vout2x2 = (int32_t) fp32_to_bits(vfpacc2x2) - vmagic_bias_less_output_zero_point;
- int32_t vout2x3 = (int32_t) fp32_to_bits(vfpacc2x3) - vmagic_bias_less_output_zero_point;
- int32_t vout3x0 = (int32_t) fp32_to_bits(vfpacc3x0) - vmagic_bias_less_output_zero_point;
- int32_t vout3x1 = (int32_t) fp32_to_bits(vfpacc3x1) - vmagic_bias_less_output_zero_point;
- int32_t vout3x2 = (int32_t) fp32_to_bits(vfpacc3x2) - vmagic_bias_less_output_zero_point;
- int32_t vout3x3 = (int32_t) fp32_to_bits(vfpacc3x3) - vmagic_bias_less_output_zero_point;
+ const int32_t voutput_zero_point = params->scalar_lrintf.output_zero_point;
+ int32_t vout0x0 = vrndacc0x0 + voutput_zero_point;
+ int32_t vout0x1 = vrndacc0x1 + voutput_zero_point;
+ int32_t vout0x2 = vrndacc0x2 + voutput_zero_point;
+ int32_t vout0x3 = vrndacc0x3 + voutput_zero_point;
+ int32_t vout1x0 = vrndacc1x0 + voutput_zero_point;
+ int32_t vout1x1 = vrndacc1x1 + voutput_zero_point;
+ int32_t vout1x2 = vrndacc1x2 + voutput_zero_point;
+ int32_t vout1x3 = vrndacc1x3 + voutput_zero_point;
+ int32_t vout2x0 = vrndacc2x0 + voutput_zero_point;
+ int32_t vout2x1 = vrndacc2x1 + voutput_zero_point;
+ int32_t vout2x2 = vrndacc2x2 + voutput_zero_point;
+ int32_t vout2x3 = vrndacc2x3 + voutput_zero_point;
+ int32_t vout3x0 = vrndacc3x0 + voutput_zero_point;
+ int32_t vout3x1 = vrndacc3x1 + voutput_zero_point;
+ int32_t vout3x2 = vrndacc3x2 + voutput_zero_point;
+ int32_t vout3x3 = vrndacc3x3 + voutput_zero_point;
if XNN_LIKELY(nc >= 4) {
c3[0] = (int8_t) vout3x0;
diff --git a/src/qs8-dwconv/gen/up1x25-minmax-fp32-scalar-lrint.c b/src/qs8-dwconv/gen/up1x25-minmax-fp32-scalar-fmagic.c
similarity index 91%
copy from src/qs8-dwconv/gen/up1x25-minmax-fp32-scalar-lrint.c
copy to src/qs8-dwconv/gen/up1x25-minmax-fp32-scalar-fmagic.c
index e65fea0..581adbf 100644
--- a/src/qs8-dwconv/gen/up1x25-minmax-fp32-scalar-lrint.c
+++ b/src/qs8-dwconv/gen/up1x25-minmax-fp32-scalar-fmagic.c
@@ -8,13 +8,14 @@
// LICENSE file in the root directory of this source tree.
#include <assert.h>
-#include <math.h>
+
+#include <fp16.h>
#include <xnnpack/dwconv.h>
#include <xnnpack/math.h>
-void xnn_qs8_dwconv_minmax_fp32_ukernel_up1x25__scalar_lrint(
+void xnn_qs8_dwconv_minmax_fp32_ukernel_up1x25__scalar_fmagic(
size_t channels,
size_t output_width,
const int8_t** input,
@@ -29,10 +30,11 @@
assert(channels != 0);
assert(output_width != 0);
- const float vscale = params->fp32_scalar_lrint.scale;
- const long voutput_min_less_zero_point = params->fp32_scalar_lrint.output_min_less_zero_point;
- const long voutput_max_less_zero_point = params->fp32_scalar_lrint.output_max_less_zero_point;
- const int32_t voutput_zero_point = params->fp32_scalar_lrint.output_zero_point;
+ const float vscale = params->fp32_scalar_fmagic.scale;
+ const float voutput_min_less_zero_point = params->fp32_scalar_fmagic.output_min_less_zero_point;
+ const float voutput_max_less_zero_point = params->fp32_scalar_fmagic.output_max_less_zero_point;
+ const float vmagic_bias = params->fp32_scalar_fmagic.magic_bias;
+ const int32_t vmagic_bias_less_output_zero_point = params->fp32_scalar_fmagic.magic_bias_less_output_zero_point;
do {
const int8_t* i0 = input[0];
assert(i0 != NULL);
@@ -244,11 +246,11 @@
w = (const void*) ((uintptr_t) w + sizeof(int32_t) + 25 * sizeof(int8_t));
- const float vfpacc = (float) vacc * vscale;
- long vrndacc = lrintf(vfpacc);
- vrndacc = XNN_UNPREDICTABLE(vrndacc < voutput_min_less_zero_point) ? voutput_min_less_zero_point : vrndacc;
- vrndacc = XNN_UNPREDICTABLE(vrndacc > voutput_max_less_zero_point) ? voutput_max_less_zero_point : vrndacc;
- int32_t vout = (int32_t) vrndacc + voutput_zero_point;
+ float vfpacc = (float) vacc * vscale;
+ vfpacc = math_max_f32(vfpacc, voutput_min_less_zero_point);
+ vfpacc = math_min_f32(vfpacc, voutput_max_less_zero_point);
+ vfpacc += vmagic_bias;
+ int32_t vout = (int32_t) fp32_to_bits(vfpacc) - vmagic_bias_less_output_zero_point;
*output++ = (int8_t) vout;
} while (--c != 0);
diff --git a/src/qs8-dwconv/gen/up1x25-minmax-fp32-scalar-lrint.c b/src/qs8-dwconv/gen/up1x25-minmax-fp32-scalar-lrintf.c
similarity index 95%
rename from src/qs8-dwconv/gen/up1x25-minmax-fp32-scalar-lrint.c
rename to src/qs8-dwconv/gen/up1x25-minmax-fp32-scalar-lrintf.c
index e65fea0..3320555 100644
--- a/src/qs8-dwconv/gen/up1x25-minmax-fp32-scalar-lrint.c
+++ b/src/qs8-dwconv/gen/up1x25-minmax-fp32-scalar-lrintf.c
@@ -14,7 +14,7 @@
#include <xnnpack/math.h>
-void xnn_qs8_dwconv_minmax_fp32_ukernel_up1x25__scalar_lrint(
+void xnn_qs8_dwconv_minmax_fp32_ukernel_up1x25__scalar_lrintf(
size_t channels,
size_t output_width,
const int8_t** input,
@@ -29,10 +29,10 @@
assert(channels != 0);
assert(output_width != 0);
- const float vscale = params->fp32_scalar_lrint.scale;
- const long voutput_min_less_zero_point = params->fp32_scalar_lrint.output_min_less_zero_point;
- const long voutput_max_less_zero_point = params->fp32_scalar_lrint.output_max_less_zero_point;
- const int32_t voutput_zero_point = params->fp32_scalar_lrint.output_zero_point;
+ const float vscale = params->fp32_scalar_lrintf.scale;
+ const float voutput_min_less_zero_point = params->fp32_scalar_lrintf.output_min_less_zero_point;
+ const float voutput_max_less_zero_point = params->fp32_scalar_lrintf.output_max_less_zero_point;
+ const int32_t voutput_zero_point = params->fp32_scalar_lrintf.output_zero_point;
do {
const int8_t* i0 = input[0];
assert(i0 != NULL);
diff --git a/src/qs8-dwconv/gen/up1x9-minmax-fp32-scalar-lrint.c b/src/qs8-dwconv/gen/up1x9-minmax-fp32-scalar-fmagic.c
similarity index 82%
copy from src/qs8-dwconv/gen/up1x9-minmax-fp32-scalar-lrint.c
copy to src/qs8-dwconv/gen/up1x9-minmax-fp32-scalar-fmagic.c
index 49d1ec1..d3ddfbc 100644
--- a/src/qs8-dwconv/gen/up1x9-minmax-fp32-scalar-lrint.c
+++ b/src/qs8-dwconv/gen/up1x9-minmax-fp32-scalar-fmagic.c
@@ -8,13 +8,14 @@
// LICENSE file in the root directory of this source tree.
#include <assert.h>
-#include <math.h>
+
+#include <fp16.h>
#include <xnnpack/dwconv.h>
#include <xnnpack/math.h>
-void xnn_qs8_dwconv_minmax_fp32_ukernel_up1x9__scalar_lrint(
+void xnn_qs8_dwconv_minmax_fp32_ukernel_up1x9__scalar_fmagic(
size_t channels,
size_t output_width,
const int8_t** input,
@@ -29,10 +30,11 @@
assert(channels != 0);
assert(output_width != 0);
- const float vscale = params->fp32_scalar_lrint.scale;
- const long voutput_min_less_zero_point = params->fp32_scalar_lrint.output_min_less_zero_point;
- const long voutput_max_less_zero_point = params->fp32_scalar_lrint.output_max_less_zero_point;
- const int32_t voutput_zero_point = params->fp32_scalar_lrint.output_zero_point;
+ const float vscale = params->fp32_scalar_fmagic.scale;
+ const float voutput_min_less_zero_point = params->fp32_scalar_fmagic.output_min_less_zero_point;
+ const float voutput_max_less_zero_point = params->fp32_scalar_fmagic.output_max_less_zero_point;
+ const float vmagic_bias = params->fp32_scalar_fmagic.magic_bias;
+ const int32_t vmagic_bias_less_output_zero_point = params->fp32_scalar_fmagic.magic_bias_less_output_zero_point;
do {
const int8_t* i0 = input[0];
assert(i0 != NULL);
@@ -116,11 +118,11 @@
w = (const void*) ((uintptr_t) w + sizeof(int32_t) + 9 * sizeof(int8_t));
- const float vfpacc = (float) vacc * vscale;
- long vrndacc = lrintf(vfpacc);
- vrndacc = XNN_UNPREDICTABLE(vrndacc < voutput_min_less_zero_point) ? voutput_min_less_zero_point : vrndacc;
- vrndacc = XNN_UNPREDICTABLE(vrndacc > voutput_max_less_zero_point) ? voutput_max_less_zero_point : vrndacc;
- int32_t vout = (int32_t) vrndacc + voutput_zero_point;
+ float vfpacc = (float) vacc * vscale;
+ vfpacc = math_max_f32(vfpacc, voutput_min_less_zero_point);
+ vfpacc = math_min_f32(vfpacc, voutput_max_less_zero_point);
+ vfpacc += vmagic_bias;
+ int32_t vout = (int32_t) fp32_to_bits(vfpacc) - vmagic_bias_less_output_zero_point;
*output++ = (int8_t) vout;
} while (--c != 0);
diff --git a/src/qs8-dwconv/gen/up1x9-minmax-fp32-scalar-lrint.c b/src/qs8-dwconv/gen/up1x9-minmax-fp32-scalar-lrintf.c
similarity index 91%
rename from src/qs8-dwconv/gen/up1x9-minmax-fp32-scalar-lrint.c
rename to src/qs8-dwconv/gen/up1x9-minmax-fp32-scalar-lrintf.c
index 49d1ec1..9aaa350 100644
--- a/src/qs8-dwconv/gen/up1x9-minmax-fp32-scalar-lrint.c
+++ b/src/qs8-dwconv/gen/up1x9-minmax-fp32-scalar-lrintf.c
@@ -14,7 +14,7 @@
#include <xnnpack/math.h>
-void xnn_qs8_dwconv_minmax_fp32_ukernel_up1x9__scalar_lrint(
+void xnn_qs8_dwconv_minmax_fp32_ukernel_up1x9__scalar_lrintf(
size_t channels,
size_t output_width,
const int8_t** input,
@@ -29,10 +29,10 @@
assert(channels != 0);
assert(output_width != 0);
- const float vscale = params->fp32_scalar_lrint.scale;
- const long voutput_min_less_zero_point = params->fp32_scalar_lrint.output_min_less_zero_point;
- const long voutput_max_less_zero_point = params->fp32_scalar_lrint.output_max_less_zero_point;
- const int32_t voutput_zero_point = params->fp32_scalar_lrint.output_zero_point;
+ const float vscale = params->fp32_scalar_lrintf.scale;
+ const float voutput_min_less_zero_point = params->fp32_scalar_lrintf.output_min_less_zero_point;
+ const float voutput_max_less_zero_point = params->fp32_scalar_lrintf.output_max_less_zero_point;
+ const int32_t voutput_zero_point = params->fp32_scalar_lrintf.output_zero_point;
do {
const int8_t* i0 = input[0];
assert(i0 != NULL);
diff --git a/src/qs8-dwconv/gen/up2x25-minmax-fp32-scalar-lrint.c b/src/qs8-dwconv/gen/up2x25-minmax-fp32-scalar-fmagic.c
similarity index 92%
copy from src/qs8-dwconv/gen/up2x25-minmax-fp32-scalar-lrint.c
copy to src/qs8-dwconv/gen/up2x25-minmax-fp32-scalar-fmagic.c
index 39a6202..f37dd5c 100644
--- a/src/qs8-dwconv/gen/up2x25-minmax-fp32-scalar-lrint.c
+++ b/src/qs8-dwconv/gen/up2x25-minmax-fp32-scalar-fmagic.c
@@ -8,13 +8,14 @@
// LICENSE file in the root directory of this source tree.
#include <assert.h>
-#include <math.h>
+
+#include <fp16.h>
#include <xnnpack/dwconv.h>
#include <xnnpack/math.h>
-void xnn_qs8_dwconv_minmax_fp32_ukernel_up2x25__scalar_lrint(
+void xnn_qs8_dwconv_minmax_fp32_ukernel_up2x25__scalar_fmagic(
size_t channels,
size_t output_width,
const int8_t** input,
@@ -29,10 +30,11 @@
assert(channels != 0);
assert(output_width != 0);
- const float vscale = params->fp32_scalar_lrint.scale;
- const long voutput_min_less_zero_point = params->fp32_scalar_lrint.output_min_less_zero_point;
- const long voutput_max_less_zero_point = params->fp32_scalar_lrint.output_max_less_zero_point;
- const int32_t voutput_zero_point = params->fp32_scalar_lrint.output_zero_point;
+ const float vscale = params->fp32_scalar_fmagic.scale;
+ const float voutput_min_less_zero_point = params->fp32_scalar_fmagic.output_min_less_zero_point;
+ const float voutput_max_less_zero_point = params->fp32_scalar_fmagic.output_max_less_zero_point;
+ const float vmagic_bias = params->fp32_scalar_fmagic.magic_bias;
+ const int32_t vmagic_bias_less_output_zero_point = params->fp32_scalar_fmagic.magic_bias_less_output_zero_point;
do {
const int8_t* i0 = input[0];
assert(i0 != NULL);
@@ -426,17 +428,17 @@
vfpacc0 *= vscale;
vfpacc1 *= vscale;
- long vrndacc0 = lrintf(vfpacc0);
- long vrndacc1 = lrintf(vfpacc1);
+ vfpacc0 = math_max_f32(vfpacc0, voutput_min_less_zero_point);
+ vfpacc1 = math_max_f32(vfpacc1, voutput_min_less_zero_point);
- vrndacc0 = XNN_UNPREDICTABLE(vrndacc0 < voutput_min_less_zero_point) ? voutput_min_less_zero_point : vrndacc0;
- vrndacc1 = XNN_UNPREDICTABLE(vrndacc1 < voutput_min_less_zero_point) ? voutput_min_less_zero_point : vrndacc1;
+ vfpacc0 = math_min_f32(vfpacc0, voutput_max_less_zero_point);
+ vfpacc1 = math_min_f32(vfpacc1, voutput_max_less_zero_point);
- vrndacc0 = XNN_UNPREDICTABLE(vrndacc0 > voutput_max_less_zero_point) ? voutput_max_less_zero_point : vrndacc0;
- vrndacc1 = XNN_UNPREDICTABLE(vrndacc1 > voutput_max_less_zero_point) ? voutput_max_less_zero_point : vrndacc1;
+ vfpacc0 += vmagic_bias;
+ vfpacc1 += vmagic_bias;
- int32_t vout0 = (int32_t) vrndacc0 + voutput_zero_point;
- int32_t vout1 = (int32_t) vrndacc1 + voutput_zero_point;
+ int32_t vout0 = (int32_t) fp32_to_bits(vfpacc0) - vmagic_bias_less_output_zero_point;
+ int32_t vout1 = (int32_t) fp32_to_bits(vfpacc1) - vmagic_bias_less_output_zero_point;
output[0] = (int8_t) vout0;
output[1] = (int8_t) vout1;
@@ -521,11 +523,11 @@
const int32_t vk24 = (int32_t) ((const int8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[48];
vacc += vi24 * vk24;
- const float vfpacc = (float) vacc * vscale;
- long vrndacc = lrintf(vfpacc);
- vrndacc = XNN_UNPREDICTABLE(vrndacc < voutput_min_less_zero_point) ? voutput_min_less_zero_point : vrndacc;
- vrndacc = XNN_UNPREDICTABLE(vrndacc > voutput_max_less_zero_point) ? voutput_max_less_zero_point : vrndacc;
- int32_t vout = (int32_t) vrndacc + voutput_zero_point;
+ float vfpacc = (float) vacc * vscale;
+ vfpacc = math_max_f32(vfpacc, voutput_min_less_zero_point);
+ vfpacc = math_min_f32(vfpacc, voutput_max_less_zero_point);
+ vfpacc += vmagic_bias;
+ int32_t vout = (int32_t) fp32_to_bits(vfpacc) - vmagic_bias_less_output_zero_point;
*output++ = (int8_t) vout;
}
diff --git a/src/qs8-dwconv/gen/up2x25-minmax-fp32-scalar-lrint.c b/src/qs8-dwconv/gen/up2x25-minmax-fp32-scalar-lrintf.c
similarity index 93%
rename from src/qs8-dwconv/gen/up2x25-minmax-fp32-scalar-lrint.c
rename to src/qs8-dwconv/gen/up2x25-minmax-fp32-scalar-lrintf.c
index 39a6202..2e9c6c0 100644
--- a/src/qs8-dwconv/gen/up2x25-minmax-fp32-scalar-lrint.c
+++ b/src/qs8-dwconv/gen/up2x25-minmax-fp32-scalar-lrintf.c
@@ -14,7 +14,7 @@
#include <xnnpack/math.h>
-void xnn_qs8_dwconv_minmax_fp32_ukernel_up2x25__scalar_lrint(
+void xnn_qs8_dwconv_minmax_fp32_ukernel_up2x25__scalar_lrintf(
size_t channels,
size_t output_width,
const int8_t** input,
@@ -29,10 +29,10 @@
assert(channels != 0);
assert(output_width != 0);
- const float vscale = params->fp32_scalar_lrint.scale;
- const long voutput_min_less_zero_point = params->fp32_scalar_lrint.output_min_less_zero_point;
- const long voutput_max_less_zero_point = params->fp32_scalar_lrint.output_max_less_zero_point;
- const int32_t voutput_zero_point = params->fp32_scalar_lrint.output_zero_point;
+ const float vscale = params->fp32_scalar_lrintf.scale;
+ const float voutput_min_less_zero_point = params->fp32_scalar_lrintf.output_min_less_zero_point;
+ const float voutput_max_less_zero_point = params->fp32_scalar_lrintf.output_max_less_zero_point;
+ const int32_t voutput_zero_point = params->fp32_scalar_lrintf.output_zero_point;
do {
const int8_t* i0 = input[0];
assert(i0 != NULL);
@@ -426,14 +426,14 @@
vfpacc0 *= vscale;
vfpacc1 *= vscale;
- long vrndacc0 = lrintf(vfpacc0);
- long vrndacc1 = lrintf(vfpacc1);
+ vfpacc0 = math_max_f32(vfpacc0, voutput_min_less_zero_point);
+ vfpacc1 = math_max_f32(vfpacc1, voutput_min_less_zero_point);
- vrndacc0 = XNN_UNPREDICTABLE(vrndacc0 < voutput_min_less_zero_point) ? voutput_min_less_zero_point : vrndacc0;
- vrndacc1 = XNN_UNPREDICTABLE(vrndacc1 < voutput_min_less_zero_point) ? voutput_min_less_zero_point : vrndacc1;
+ vfpacc0 = math_min_f32(vfpacc0, voutput_max_less_zero_point);
+ vfpacc1 = math_min_f32(vfpacc1, voutput_max_less_zero_point);
- vrndacc0 = XNN_UNPREDICTABLE(vrndacc0 > voutput_max_less_zero_point) ? voutput_max_less_zero_point : vrndacc0;
- vrndacc1 = XNN_UNPREDICTABLE(vrndacc1 > voutput_max_less_zero_point) ? voutput_max_less_zero_point : vrndacc1;
+ const int32_t vrndacc0 = (int32_t) lrintf(vfpacc0);
+ const int32_t vrndacc1 = (int32_t) lrintf(vfpacc1);
int32_t vout0 = (int32_t) vrndacc0 + voutput_zero_point;
int32_t vout1 = (int32_t) vrndacc1 + voutput_zero_point;
@@ -521,11 +521,11 @@
const int32_t vk24 = (int32_t) ((const int8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[48];
vacc += vi24 * vk24;
- const float vfpacc = (float) vacc * vscale;
- long vrndacc = lrintf(vfpacc);
- vrndacc = XNN_UNPREDICTABLE(vrndacc < voutput_min_less_zero_point) ? voutput_min_less_zero_point : vrndacc;
- vrndacc = XNN_UNPREDICTABLE(vrndacc > voutput_max_less_zero_point) ? voutput_max_less_zero_point : vrndacc;
- int32_t vout = (int32_t) vrndacc + voutput_zero_point;
+ float vfpacc = (float) vacc * vscale;
+ vfpacc = math_max_f32(vfpacc, voutput_min_less_zero_point);
+ vfpacc = math_min_f32(vfpacc, voutput_max_less_zero_point);
+ const int32_t vrndacc = (int32_t) lrintf(vfpacc);
+ int32_t vout = vrndacc + voutput_zero_point;
*output++ = (int8_t) vout;
}
diff --git a/src/qs8-dwconv/gen/up2x9-minmax-fp32-scalar-lrint.c b/src/qs8-dwconv/gen/up2x9-minmax-fp32-scalar-fmagic.c
similarity index 83%
copy from src/qs8-dwconv/gen/up2x9-minmax-fp32-scalar-lrint.c
copy to src/qs8-dwconv/gen/up2x9-minmax-fp32-scalar-fmagic.c
index 59c0f62..1957658 100644
--- a/src/qs8-dwconv/gen/up2x9-minmax-fp32-scalar-lrint.c
+++ b/src/qs8-dwconv/gen/up2x9-minmax-fp32-scalar-fmagic.c
@@ -8,13 +8,14 @@
// LICENSE file in the root directory of this source tree.
#include <assert.h>
-#include <math.h>
+
+#include <fp16.h>
#include <xnnpack/dwconv.h>
#include <xnnpack/math.h>
-void xnn_qs8_dwconv_minmax_fp32_ukernel_up2x9__scalar_lrint(
+void xnn_qs8_dwconv_minmax_fp32_ukernel_up2x9__scalar_fmagic(
size_t channels,
size_t output_width,
const int8_t** input,
@@ -29,10 +30,11 @@
assert(channels != 0);
assert(output_width != 0);
- const float vscale = params->fp32_scalar_lrint.scale;
- const long voutput_min_less_zero_point = params->fp32_scalar_lrint.output_min_less_zero_point;
- const long voutput_max_less_zero_point = params->fp32_scalar_lrint.output_max_less_zero_point;
- const int32_t voutput_zero_point = params->fp32_scalar_lrint.output_zero_point;
+ const float vscale = params->fp32_scalar_fmagic.scale;
+ const float voutput_min_less_zero_point = params->fp32_scalar_fmagic.output_min_less_zero_point;
+ const float voutput_max_less_zero_point = params->fp32_scalar_fmagic.output_max_less_zero_point;
+ const float vmagic_bias = params->fp32_scalar_fmagic.magic_bias;
+ const int32_t vmagic_bias_less_output_zero_point = params->fp32_scalar_fmagic.magic_bias_less_output_zero_point;
do {
const int8_t* i0 = input[0];
assert(i0 != NULL);
@@ -186,17 +188,17 @@
vfpacc0 *= vscale;
vfpacc1 *= vscale;
- long vrndacc0 = lrintf(vfpacc0);
- long vrndacc1 = lrintf(vfpacc1);
+ vfpacc0 = math_max_f32(vfpacc0, voutput_min_less_zero_point);
+ vfpacc1 = math_max_f32(vfpacc1, voutput_min_less_zero_point);
- vrndacc0 = XNN_UNPREDICTABLE(vrndacc0 < voutput_min_less_zero_point) ? voutput_min_less_zero_point : vrndacc0;
- vrndacc1 = XNN_UNPREDICTABLE(vrndacc1 < voutput_min_less_zero_point) ? voutput_min_less_zero_point : vrndacc1;
+ vfpacc0 = math_min_f32(vfpacc0, voutput_max_less_zero_point);
+ vfpacc1 = math_min_f32(vfpacc1, voutput_max_less_zero_point);
- vrndacc0 = XNN_UNPREDICTABLE(vrndacc0 > voutput_max_less_zero_point) ? voutput_max_less_zero_point : vrndacc0;
- vrndacc1 = XNN_UNPREDICTABLE(vrndacc1 > voutput_max_less_zero_point) ? voutput_max_less_zero_point : vrndacc1;
+ vfpacc0 += vmagic_bias;
+ vfpacc1 += vmagic_bias;
- int32_t vout0 = (int32_t) vrndacc0 + voutput_zero_point;
- int32_t vout1 = (int32_t) vrndacc1 + voutput_zero_point;
+ int32_t vout0 = (int32_t) fp32_to_bits(vfpacc0) - vmagic_bias_less_output_zero_point;
+ int32_t vout1 = (int32_t) fp32_to_bits(vfpacc1) - vmagic_bias_less_output_zero_point;
output[0] = (int8_t) vout0;
output[1] = (int8_t) vout1;
@@ -233,11 +235,11 @@
const int32_t vk8 = (int32_t) ((const int8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[16];
vacc += vi8 * vk8;
- const float vfpacc = (float) vacc * vscale;
- long vrndacc = lrintf(vfpacc);
- vrndacc = XNN_UNPREDICTABLE(vrndacc < voutput_min_less_zero_point) ? voutput_min_less_zero_point : vrndacc;
- vrndacc = XNN_UNPREDICTABLE(vrndacc > voutput_max_less_zero_point) ? voutput_max_less_zero_point : vrndacc;
- int32_t vout = (int32_t) vrndacc + voutput_zero_point;
+ float vfpacc = (float) vacc * vscale;
+ vfpacc = math_max_f32(vfpacc, voutput_min_less_zero_point);
+ vfpacc = math_min_f32(vfpacc, voutput_max_less_zero_point);
+ vfpacc += vmagic_bias;
+ int32_t vout = (int32_t) fp32_to_bits(vfpacc) - vmagic_bias_less_output_zero_point;
*output++ = (int8_t) vout;
}
diff --git a/src/qs8-dwconv/gen/up2x9-minmax-fp32-scalar-lrint.c b/src/qs8-dwconv/gen/up2x9-minmax-fp32-scalar-lrintf.c
similarity index 85%
rename from src/qs8-dwconv/gen/up2x9-minmax-fp32-scalar-lrint.c
rename to src/qs8-dwconv/gen/up2x9-minmax-fp32-scalar-lrintf.c
index 59c0f62..7537282 100644
--- a/src/qs8-dwconv/gen/up2x9-minmax-fp32-scalar-lrint.c
+++ b/src/qs8-dwconv/gen/up2x9-minmax-fp32-scalar-lrintf.c
@@ -14,7 +14,7 @@
#include <xnnpack/math.h>
-void xnn_qs8_dwconv_minmax_fp32_ukernel_up2x9__scalar_lrint(
+void xnn_qs8_dwconv_minmax_fp32_ukernel_up2x9__scalar_lrintf(
size_t channels,
size_t output_width,
const int8_t** input,
@@ -29,10 +29,10 @@
assert(channels != 0);
assert(output_width != 0);
- const float vscale = params->fp32_scalar_lrint.scale;
- const long voutput_min_less_zero_point = params->fp32_scalar_lrint.output_min_less_zero_point;
- const long voutput_max_less_zero_point = params->fp32_scalar_lrint.output_max_less_zero_point;
- const int32_t voutput_zero_point = params->fp32_scalar_lrint.output_zero_point;
+ const float vscale = params->fp32_scalar_lrintf.scale;
+ const float voutput_min_less_zero_point = params->fp32_scalar_lrintf.output_min_less_zero_point;
+ const float voutput_max_less_zero_point = params->fp32_scalar_lrintf.output_max_less_zero_point;
+ const int32_t voutput_zero_point = params->fp32_scalar_lrintf.output_zero_point;
do {
const int8_t* i0 = input[0];
assert(i0 != NULL);
@@ -186,14 +186,14 @@
vfpacc0 *= vscale;
vfpacc1 *= vscale;
- long vrndacc0 = lrintf(vfpacc0);
- long vrndacc1 = lrintf(vfpacc1);
+ vfpacc0 = math_max_f32(vfpacc0, voutput_min_less_zero_point);
+ vfpacc1 = math_max_f32(vfpacc1, voutput_min_less_zero_point);
- vrndacc0 = XNN_UNPREDICTABLE(vrndacc0 < voutput_min_less_zero_point) ? voutput_min_less_zero_point : vrndacc0;
- vrndacc1 = XNN_UNPREDICTABLE(vrndacc1 < voutput_min_less_zero_point) ? voutput_min_less_zero_point : vrndacc1;
+ vfpacc0 = math_min_f32(vfpacc0, voutput_max_less_zero_point);
+ vfpacc1 = math_min_f32(vfpacc1, voutput_max_less_zero_point);
- vrndacc0 = XNN_UNPREDICTABLE(vrndacc0 > voutput_max_less_zero_point) ? voutput_max_less_zero_point : vrndacc0;
- vrndacc1 = XNN_UNPREDICTABLE(vrndacc1 > voutput_max_less_zero_point) ? voutput_max_less_zero_point : vrndacc1;
+ const int32_t vrndacc0 = (int32_t) lrintf(vfpacc0);
+ const int32_t vrndacc1 = (int32_t) lrintf(vfpacc1);
int32_t vout0 = (int32_t) vrndacc0 + voutput_zero_point;
int32_t vout1 = (int32_t) vrndacc1 + voutput_zero_point;
@@ -233,11 +233,11 @@
const int32_t vk8 = (int32_t) ((const int8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[16];
vacc += vi8 * vk8;
- const float vfpacc = (float) vacc * vscale;
- long vrndacc = lrintf(vfpacc);
- vrndacc = XNN_UNPREDICTABLE(vrndacc < voutput_min_less_zero_point) ? voutput_min_less_zero_point : vrndacc;
- vrndacc = XNN_UNPREDICTABLE(vrndacc > voutput_max_less_zero_point) ? voutput_max_less_zero_point : vrndacc;
- int32_t vout = (int32_t) vrndacc + voutput_zero_point;
+ float vfpacc = (float) vacc * vscale;
+ vfpacc = math_max_f32(vfpacc, voutput_min_less_zero_point);
+ vfpacc = math_min_f32(vfpacc, voutput_max_less_zero_point);
+ const int32_t vrndacc = (int32_t) lrintf(vfpacc);
+ int32_t vout = vrndacc + voutput_zero_point;
*output++ = (int8_t) vout;
}
diff --git a/src/qs8-dwconv/gen/up4x25-minmax-fp32-scalar-lrint.c b/src/qs8-dwconv/gen/up4x25-minmax-fp32-scalar-fmagic.c
similarity index 92%
copy from src/qs8-dwconv/gen/up4x25-minmax-fp32-scalar-lrint.c
copy to src/qs8-dwconv/gen/up4x25-minmax-fp32-scalar-fmagic.c
index 433dcfc..551ef27 100644
--- a/src/qs8-dwconv/gen/up4x25-minmax-fp32-scalar-lrint.c
+++ b/src/qs8-dwconv/gen/up4x25-minmax-fp32-scalar-fmagic.c
@@ -8,13 +8,14 @@
// LICENSE file in the root directory of this source tree.
#include <assert.h>
-#include <math.h>
+
+#include <fp16.h>
#include <xnnpack/dwconv.h>
#include <xnnpack/math.h>
-void xnn_qs8_dwconv_minmax_fp32_ukernel_up4x25__scalar_lrint(
+void xnn_qs8_dwconv_minmax_fp32_ukernel_up4x25__scalar_fmagic(
size_t channels,
size_t output_width,
const int8_t** input,
@@ -29,10 +30,11 @@
assert(channels != 0);
assert(output_width != 0);
- const float vscale = params->fp32_scalar_lrint.scale;
- const long voutput_min_less_zero_point = params->fp32_scalar_lrint.output_min_less_zero_point;
- const long voutput_max_less_zero_point = params->fp32_scalar_lrint.output_max_less_zero_point;
- const int32_t voutput_zero_point = params->fp32_scalar_lrint.output_zero_point;
+ const float vscale = params->fp32_scalar_fmagic.scale;
+ const float voutput_min_less_zero_point = params->fp32_scalar_fmagic.output_min_less_zero_point;
+ const float voutput_max_less_zero_point = params->fp32_scalar_fmagic.output_max_less_zero_point;
+ const float vmagic_bias = params->fp32_scalar_fmagic.magic_bias;
+ const int32_t vmagic_bias_less_output_zero_point = params->fp32_scalar_fmagic.magic_bias_less_output_zero_point;
do {
const int8_t* i0 = input[0];
assert(i0 != NULL);
@@ -582,25 +584,25 @@
vfpacc2 *= vscale;
vfpacc3 *= vscale;
- long vrndacc0 = lrintf(vfpacc0);
- long vrndacc1 = lrintf(vfpacc1);
- long vrndacc2 = lrintf(vfpacc2);
- long vrndacc3 = lrintf(vfpacc3);
+ vfpacc0 = math_max_f32(vfpacc0, voutput_min_less_zero_point);
+ vfpacc1 = math_max_f32(vfpacc1, voutput_min_less_zero_point);
+ vfpacc2 = math_max_f32(vfpacc2, voutput_min_less_zero_point);
+ vfpacc3 = math_max_f32(vfpacc3, voutput_min_less_zero_point);
- vrndacc0 = XNN_UNPREDICTABLE(vrndacc0 < voutput_min_less_zero_point) ? voutput_min_less_zero_point : vrndacc0;
- vrndacc1 = XNN_UNPREDICTABLE(vrndacc1 < voutput_min_less_zero_point) ? voutput_min_less_zero_point : vrndacc1;
- vrndacc2 = XNN_UNPREDICTABLE(vrndacc2 < voutput_min_less_zero_point) ? voutput_min_less_zero_point : vrndacc2;
- vrndacc3 = XNN_UNPREDICTABLE(vrndacc3 < voutput_min_less_zero_point) ? voutput_min_less_zero_point : vrndacc3;
+ vfpacc0 = math_min_f32(vfpacc0, voutput_max_less_zero_point);
+ vfpacc1 = math_min_f32(vfpacc1, voutput_max_less_zero_point);
+ vfpacc2 = math_min_f32(vfpacc2, voutput_max_less_zero_point);
+ vfpacc3 = math_min_f32(vfpacc3, voutput_max_less_zero_point);
- vrndacc0 = XNN_UNPREDICTABLE(vrndacc0 > voutput_max_less_zero_point) ? voutput_max_less_zero_point : vrndacc0;
- vrndacc1 = XNN_UNPREDICTABLE(vrndacc1 > voutput_max_less_zero_point) ? voutput_max_less_zero_point : vrndacc1;
- vrndacc2 = XNN_UNPREDICTABLE(vrndacc2 > voutput_max_less_zero_point) ? voutput_max_less_zero_point : vrndacc2;
- vrndacc3 = XNN_UNPREDICTABLE(vrndacc3 > voutput_max_less_zero_point) ? voutput_max_less_zero_point : vrndacc3;
+ vfpacc0 += vmagic_bias;
+ vfpacc1 += vmagic_bias;
+ vfpacc2 += vmagic_bias;
+ vfpacc3 += vmagic_bias;
- int32_t vout0 = (int32_t) vrndacc0 + voutput_zero_point;
- int32_t vout1 = (int32_t) vrndacc1 + voutput_zero_point;
- int32_t vout2 = (int32_t) vrndacc2 + voutput_zero_point;
- int32_t vout3 = (int32_t) vrndacc3 + voutput_zero_point;
+ int32_t vout0 = (int32_t) fp32_to_bits(vfpacc0) - vmagic_bias_less_output_zero_point;
+ int32_t vout1 = (int32_t) fp32_to_bits(vfpacc1) - vmagic_bias_less_output_zero_point;
+ int32_t vout2 = (int32_t) fp32_to_bits(vfpacc2) - vmagic_bias_less_output_zero_point;
+ int32_t vout3 = (int32_t) fp32_to_bits(vfpacc3) - vmagic_bias_less_output_zero_point;
output[0] = (int8_t) vout0;
output[1] = (int8_t) vout1;
@@ -691,11 +693,11 @@
vacc += vi24 * vk24;
k += 1;
- const float vfpacc = (float) vacc * vscale;
- long vrndacc = lrintf(vfpacc);
- vrndacc = XNN_UNPREDICTABLE(vrndacc < voutput_min_less_zero_point) ? voutput_min_less_zero_point : vrndacc;
- vrndacc = XNN_UNPREDICTABLE(vrndacc > voutput_max_less_zero_point) ? voutput_max_less_zero_point : vrndacc;
- int32_t vout = (int32_t) vrndacc + voutput_zero_point;
+ float vfpacc = (float) vacc * vscale;
+ vfpacc = math_max_f32(vfpacc, voutput_min_less_zero_point);
+ vfpacc = math_min_f32(vfpacc, voutput_max_less_zero_point);
+ vfpacc += vmagic_bias;
+ int32_t vout = (int32_t) fp32_to_bits(vfpacc) - vmagic_bias_less_output_zero_point;
*output++ = (int8_t) vout;
} while (--c != 0);
diff --git a/src/qs8-dwconv/gen/up4x25-minmax-fp32-scalar-lrint.c b/src/qs8-dwconv/gen/up4x25-minmax-fp32-scalar-lrintf.c
similarity index 93%
rename from src/qs8-dwconv/gen/up4x25-minmax-fp32-scalar-lrint.c
rename to src/qs8-dwconv/gen/up4x25-minmax-fp32-scalar-lrintf.c
index 433dcfc..3807d9a 100644
--- a/src/qs8-dwconv/gen/up4x25-minmax-fp32-scalar-lrint.c
+++ b/src/qs8-dwconv/gen/up4x25-minmax-fp32-scalar-lrintf.c
@@ -14,7 +14,7 @@
#include <xnnpack/math.h>
-void xnn_qs8_dwconv_minmax_fp32_ukernel_up4x25__scalar_lrint(
+void xnn_qs8_dwconv_minmax_fp32_ukernel_up4x25__scalar_lrintf(
size_t channels,
size_t output_width,
const int8_t** input,
@@ -29,10 +29,10 @@
assert(channels != 0);
assert(output_width != 0);
- const float vscale = params->fp32_scalar_lrint.scale;
- const long voutput_min_less_zero_point = params->fp32_scalar_lrint.output_min_less_zero_point;
- const long voutput_max_less_zero_point = params->fp32_scalar_lrint.output_max_less_zero_point;
- const int32_t voutput_zero_point = params->fp32_scalar_lrint.output_zero_point;
+ const float vscale = params->fp32_scalar_lrintf.scale;
+ const float voutput_min_less_zero_point = params->fp32_scalar_lrintf.output_min_less_zero_point;
+ const float voutput_max_less_zero_point = params->fp32_scalar_lrintf.output_max_less_zero_point;
+ const int32_t voutput_zero_point = params->fp32_scalar_lrintf.output_zero_point;
do {
const int8_t* i0 = input[0];
assert(i0 != NULL);
@@ -582,20 +582,20 @@
vfpacc2 *= vscale;
vfpacc3 *= vscale;
- long vrndacc0 = lrintf(vfpacc0);
- long vrndacc1 = lrintf(vfpacc1);
- long vrndacc2 = lrintf(vfpacc2);
- long vrndacc3 = lrintf(vfpacc3);
+ vfpacc0 = math_max_f32(vfpacc0, voutput_min_less_zero_point);
+ vfpacc1 = math_max_f32(vfpacc1, voutput_min_less_zero_point);
+ vfpacc2 = math_max_f32(vfpacc2, voutput_min_less_zero_point);
+ vfpacc3 = math_max_f32(vfpacc3, voutput_min_less_zero_point);
- vrndacc0 = XNN_UNPREDICTABLE(vrndacc0 < voutput_min_less_zero_point) ? voutput_min_less_zero_point : vrndacc0;
- vrndacc1 = XNN_UNPREDICTABLE(vrndacc1 < voutput_min_less_zero_point) ? voutput_min_less_zero_point : vrndacc1;
- vrndacc2 = XNN_UNPREDICTABLE(vrndacc2 < voutput_min_less_zero_point) ? voutput_min_less_zero_point : vrndacc2;
- vrndacc3 = XNN_UNPREDICTABLE(vrndacc3 < voutput_min_less_zero_point) ? voutput_min_less_zero_point : vrndacc3;
+ vfpacc0 = math_min_f32(vfpacc0, voutput_max_less_zero_point);
+ vfpacc1 = math_min_f32(vfpacc1, voutput_max_less_zero_point);
+ vfpacc2 = math_min_f32(vfpacc2, voutput_max_less_zero_point);
+ vfpacc3 = math_min_f32(vfpacc3, voutput_max_less_zero_point);
- vrndacc0 = XNN_UNPREDICTABLE(vrndacc0 > voutput_max_less_zero_point) ? voutput_max_less_zero_point : vrndacc0;
- vrndacc1 = XNN_UNPREDICTABLE(vrndacc1 > voutput_max_less_zero_point) ? voutput_max_less_zero_point : vrndacc1;
- vrndacc2 = XNN_UNPREDICTABLE(vrndacc2 > voutput_max_less_zero_point) ? voutput_max_less_zero_point : vrndacc2;
- vrndacc3 = XNN_UNPREDICTABLE(vrndacc3 > voutput_max_less_zero_point) ? voutput_max_less_zero_point : vrndacc3;
+ const int32_t vrndacc0 = (int32_t) lrintf(vfpacc0);
+ const int32_t vrndacc1 = (int32_t) lrintf(vfpacc1);
+ const int32_t vrndacc2 = (int32_t) lrintf(vfpacc2);
+ const int32_t vrndacc3 = (int32_t) lrintf(vfpacc3);
int32_t vout0 = (int32_t) vrndacc0 + voutput_zero_point;
int32_t vout1 = (int32_t) vrndacc1 + voutput_zero_point;
@@ -691,11 +691,11 @@
vacc += vi24 * vk24;
k += 1;
- const float vfpacc = (float) vacc * vscale;
- long vrndacc = lrintf(vfpacc);
- vrndacc = XNN_UNPREDICTABLE(vrndacc < voutput_min_less_zero_point) ? voutput_min_less_zero_point : vrndacc;
- vrndacc = XNN_UNPREDICTABLE(vrndacc > voutput_max_less_zero_point) ? voutput_max_less_zero_point : vrndacc;
- int32_t vout = (int32_t) vrndacc + voutput_zero_point;
+ float vfpacc = (float) vacc * vscale;
+ vfpacc = math_max_f32(vfpacc, voutput_min_less_zero_point);
+ vfpacc = math_min_f32(vfpacc, voutput_max_less_zero_point);
+ const int32_t vrndacc = (int32_t) lrintf(vfpacc);
+ int32_t vout = vrndacc + voutput_zero_point;
*output++ = (int8_t) vout;
} while (--c != 0);
diff --git a/src/qs8-dwconv/gen/up4x9-minmax-fp32-scalar-lrint.c b/src/qs8-dwconv/gen/up4x9-minmax-fp32-scalar-fmagic.c
similarity index 83%
copy from src/qs8-dwconv/gen/up4x9-minmax-fp32-scalar-lrint.c
copy to src/qs8-dwconv/gen/up4x9-minmax-fp32-scalar-fmagic.c
index 8dd81d3..645dd17 100644
--- a/src/qs8-dwconv/gen/up4x9-minmax-fp32-scalar-lrint.c
+++ b/src/qs8-dwconv/gen/up4x9-minmax-fp32-scalar-fmagic.c
@@ -8,13 +8,14 @@
// LICENSE file in the root directory of this source tree.
#include <assert.h>
-#include <math.h>
+
+#include <fp16.h>
#include <xnnpack/dwconv.h>
#include <xnnpack/math.h>
-void xnn_qs8_dwconv_minmax_fp32_ukernel_up4x9__scalar_lrint(
+void xnn_qs8_dwconv_minmax_fp32_ukernel_up4x9__scalar_fmagic(
size_t channels,
size_t output_width,
const int8_t** input,
@@ -29,10 +30,11 @@
assert(channels != 0);
assert(output_width != 0);
- const float vscale = params->fp32_scalar_lrint.scale;
- const long voutput_min_less_zero_point = params->fp32_scalar_lrint.output_min_less_zero_point;
- const long voutput_max_less_zero_point = params->fp32_scalar_lrint.output_max_less_zero_point;
- const int32_t voutput_zero_point = params->fp32_scalar_lrint.output_zero_point;
+ const float vscale = params->fp32_scalar_fmagic.scale;
+ const float voutput_min_less_zero_point = params->fp32_scalar_fmagic.output_min_less_zero_point;
+ const float voutput_max_less_zero_point = params->fp32_scalar_fmagic.output_max_less_zero_point;
+ const float vmagic_bias = params->fp32_scalar_fmagic.magic_bias;
+ const int32_t vmagic_bias_less_output_zero_point = params->fp32_scalar_fmagic.magic_bias_less_output_zero_point;
do {
const int8_t* i0 = input[0];
assert(i0 != NULL);
@@ -246,25 +248,25 @@
vfpacc2 *= vscale;
vfpacc3 *= vscale;
- long vrndacc0 = lrintf(vfpacc0);
- long vrndacc1 = lrintf(vfpacc1);
- long vrndacc2 = lrintf(vfpacc2);
- long vrndacc3 = lrintf(vfpacc3);
+ vfpacc0 = math_max_f32(vfpacc0, voutput_min_less_zero_point);
+ vfpacc1 = math_max_f32(vfpacc1, voutput_min_less_zero_point);
+ vfpacc2 = math_max_f32(vfpacc2, voutput_min_less_zero_point);
+ vfpacc3 = math_max_f32(vfpacc3, voutput_min_less_zero_point);
- vrndacc0 = XNN_UNPREDICTABLE(vrndacc0 < voutput_min_less_zero_point) ? voutput_min_less_zero_point : vrndacc0;
- vrndacc1 = XNN_UNPREDICTABLE(vrndacc1 < voutput_min_less_zero_point) ? voutput_min_less_zero_point : vrndacc1;
- vrndacc2 = XNN_UNPREDICTABLE(vrndacc2 < voutput_min_less_zero_point) ? voutput_min_less_zero_point : vrndacc2;
- vrndacc3 = XNN_UNPREDICTABLE(vrndacc3 < voutput_min_less_zero_point) ? voutput_min_less_zero_point : vrndacc3;
+ vfpacc0 = math_min_f32(vfpacc0, voutput_max_less_zero_point);
+ vfpacc1 = math_min_f32(vfpacc1, voutput_max_less_zero_point);
+ vfpacc2 = math_min_f32(vfpacc2, voutput_max_less_zero_point);
+ vfpacc3 = math_min_f32(vfpacc3, voutput_max_less_zero_point);
- vrndacc0 = XNN_UNPREDICTABLE(vrndacc0 > voutput_max_less_zero_point) ? voutput_max_less_zero_point : vrndacc0;
- vrndacc1 = XNN_UNPREDICTABLE(vrndacc1 > voutput_max_less_zero_point) ? voutput_max_less_zero_point : vrndacc1;
- vrndacc2 = XNN_UNPREDICTABLE(vrndacc2 > voutput_max_less_zero_point) ? voutput_max_less_zero_point : vrndacc2;
- vrndacc3 = XNN_UNPREDICTABLE(vrndacc3 > voutput_max_less_zero_point) ? voutput_max_less_zero_point : vrndacc3;
+ vfpacc0 += vmagic_bias;
+ vfpacc1 += vmagic_bias;
+ vfpacc2 += vmagic_bias;
+ vfpacc3 += vmagic_bias;
- int32_t vout0 = (int32_t) vrndacc0 + voutput_zero_point;
- int32_t vout1 = (int32_t) vrndacc1 + voutput_zero_point;
- int32_t vout2 = (int32_t) vrndacc2 + voutput_zero_point;
- int32_t vout3 = (int32_t) vrndacc3 + voutput_zero_point;
+ int32_t vout0 = (int32_t) fp32_to_bits(vfpacc0) - vmagic_bias_less_output_zero_point;
+ int32_t vout1 = (int32_t) fp32_to_bits(vfpacc1) - vmagic_bias_less_output_zero_point;
+ int32_t vout2 = (int32_t) fp32_to_bits(vfpacc2) - vmagic_bias_less_output_zero_point;
+ int32_t vout3 = (int32_t) fp32_to_bits(vfpacc3) - vmagic_bias_less_output_zero_point;
output[0] = (int8_t) vout0;
output[1] = (int8_t) vout1;
@@ -307,11 +309,11 @@
vacc += vi8 * vk8;
k += 1;
- const float vfpacc = (float) vacc * vscale;
- long vrndacc = lrintf(vfpacc);
- vrndacc = XNN_UNPREDICTABLE(vrndacc < voutput_min_less_zero_point) ? voutput_min_less_zero_point : vrndacc;
- vrndacc = XNN_UNPREDICTABLE(vrndacc > voutput_max_less_zero_point) ? voutput_max_less_zero_point : vrndacc;
- int32_t vout = (int32_t) vrndacc + voutput_zero_point;
+ float vfpacc = (float) vacc * vscale;
+ vfpacc = math_max_f32(vfpacc, voutput_min_less_zero_point);
+ vfpacc = math_min_f32(vfpacc, voutput_max_less_zero_point);
+ vfpacc += vmagic_bias;
+ int32_t vout = (int32_t) fp32_to_bits(vfpacc) - vmagic_bias_less_output_zero_point;
*output++ = (int8_t) vout;
} while (--c != 0);
diff --git a/src/qs8-dwconv/gen/up4x9-minmax-fp32-scalar-lrint.c b/src/qs8-dwconv/gen/up4x9-minmax-fp32-scalar-lrintf.c
similarity index 85%
rename from src/qs8-dwconv/gen/up4x9-minmax-fp32-scalar-lrint.c
rename to src/qs8-dwconv/gen/up4x9-minmax-fp32-scalar-lrintf.c
index 8dd81d3..4d62706 100644
--- a/src/qs8-dwconv/gen/up4x9-minmax-fp32-scalar-lrint.c
+++ b/src/qs8-dwconv/gen/up4x9-minmax-fp32-scalar-lrintf.c
@@ -14,7 +14,7 @@
#include <xnnpack/math.h>
-void xnn_qs8_dwconv_minmax_fp32_ukernel_up4x9__scalar_lrint(
+void xnn_qs8_dwconv_minmax_fp32_ukernel_up4x9__scalar_lrintf(
size_t channels,
size_t output_width,
const int8_t** input,
@@ -29,10 +29,10 @@
assert(channels != 0);
assert(output_width != 0);
- const float vscale = params->fp32_scalar_lrint.scale;
- const long voutput_min_less_zero_point = params->fp32_scalar_lrint.output_min_less_zero_point;
- const long voutput_max_less_zero_point = params->fp32_scalar_lrint.output_max_less_zero_point;
- const int32_t voutput_zero_point = params->fp32_scalar_lrint.output_zero_point;
+ const float vscale = params->fp32_scalar_lrintf.scale;
+ const float voutput_min_less_zero_point = params->fp32_scalar_lrintf.output_min_less_zero_point;
+ const float voutput_max_less_zero_point = params->fp32_scalar_lrintf.output_max_less_zero_point;
+ const int32_t voutput_zero_point = params->fp32_scalar_lrintf.output_zero_point;
do {
const int8_t* i0 = input[0];
assert(i0 != NULL);
@@ -246,20 +246,20 @@
vfpacc2 *= vscale;
vfpacc3 *= vscale;
- long vrndacc0 = lrintf(vfpacc0);
- long vrndacc1 = lrintf(vfpacc1);
- long vrndacc2 = lrintf(vfpacc2);
- long vrndacc3 = lrintf(vfpacc3);
+ vfpacc0 = math_max_f32(vfpacc0, voutput_min_less_zero_point);
+ vfpacc1 = math_max_f32(vfpacc1, voutput_min_less_zero_point);
+ vfpacc2 = math_max_f32(vfpacc2, voutput_min_less_zero_point);
+ vfpacc3 = math_max_f32(vfpacc3, voutput_min_less_zero_point);
- vrndacc0 = XNN_UNPREDICTABLE(vrndacc0 < voutput_min_less_zero_point) ? voutput_min_less_zero_point : vrndacc0;
- vrndacc1 = XNN_UNPREDICTABLE(vrndacc1 < voutput_min_less_zero_point) ? voutput_min_less_zero_point : vrndacc1;
- vrndacc2 = XNN_UNPREDICTABLE(vrndacc2 < voutput_min_less_zero_point) ? voutput_min_less_zero_point : vrndacc2;
- vrndacc3 = XNN_UNPREDICTABLE(vrndacc3 < voutput_min_less_zero_point) ? voutput_min_less_zero_point : vrndacc3;
+ vfpacc0 = math_min_f32(vfpacc0, voutput_max_less_zero_point);
+ vfpacc1 = math_min_f32(vfpacc1, voutput_max_less_zero_point);
+ vfpacc2 = math_min_f32(vfpacc2, voutput_max_less_zero_point);
+ vfpacc3 = math_min_f32(vfpacc3, voutput_max_less_zero_point);
- vrndacc0 = XNN_UNPREDICTABLE(vrndacc0 > voutput_max_less_zero_point) ? voutput_max_less_zero_point : vrndacc0;
- vrndacc1 = XNN_UNPREDICTABLE(vrndacc1 > voutput_max_less_zero_point) ? voutput_max_less_zero_point : vrndacc1;
- vrndacc2 = XNN_UNPREDICTABLE(vrndacc2 > voutput_max_less_zero_point) ? voutput_max_less_zero_point : vrndacc2;
- vrndacc3 = XNN_UNPREDICTABLE(vrndacc3 > voutput_max_less_zero_point) ? voutput_max_less_zero_point : vrndacc3;
+ const int32_t vrndacc0 = (int32_t) lrintf(vfpacc0);
+ const int32_t vrndacc1 = (int32_t) lrintf(vfpacc1);
+ const int32_t vrndacc2 = (int32_t) lrintf(vfpacc2);
+ const int32_t vrndacc3 = (int32_t) lrintf(vfpacc3);
int32_t vout0 = (int32_t) vrndacc0 + voutput_zero_point;
int32_t vout1 = (int32_t) vrndacc1 + voutput_zero_point;
@@ -307,11 +307,11 @@
vacc += vi8 * vk8;
k += 1;
- const float vfpacc = (float) vacc * vscale;
- long vrndacc = lrintf(vfpacc);
- vrndacc = XNN_UNPREDICTABLE(vrndacc < voutput_min_less_zero_point) ? voutput_min_less_zero_point : vrndacc;
- vrndacc = XNN_UNPREDICTABLE(vrndacc > voutput_max_less_zero_point) ? voutput_max_less_zero_point : vrndacc;
- int32_t vout = (int32_t) vrndacc + voutput_zero_point;
+ float vfpacc = (float) vacc * vscale;
+ vfpacc = math_max_f32(vfpacc, voutput_min_less_zero_point);
+ vfpacc = math_min_f32(vfpacc, voutput_max_less_zero_point);
+ const int32_t vrndacc = (int32_t) lrintf(vfpacc);
+ int32_t vout = vrndacc + voutput_zero_point;
*output++ = (int8_t) vout;
} while (--c != 0);
diff --git a/src/qs8-dwconv/unipass-scalar.c.in b/src/qs8-dwconv/unipass-scalar.c.in
index 5d0173f..c06c20a 100644
--- a/src/qs8-dwconv/unipass-scalar.c.in
+++ b/src/qs8-dwconv/unipass-scalar.c.in
@@ -5,11 +5,12 @@
$assert KERNEL_TILE >= 2
$assert REQUANTIZATION == "FP32"
+$assert VARIANT in ["FMAGIC", "LRINTF"]
$assert DATATYPE in ["QC8", "QS8", "QU8"]
#include <assert.h>
-$if VARIANT == "LRINT":
+$if VARIANT == "LRINTF":
#include <math.h>
-$elif VARIANT == "MAGIC":
+$elif VARIANT == "FMAGIC":
#include <fp16.h>
@@ -38,11 +39,11 @@
$if DATATYPE != "QC8":
const float vscale = params->${PARAMS_STRUCT}.scale;
- $if VARIANT == "LRINT":
- const long voutput_min_less_zero_point = params->${PARAMS_STRUCT}.output_min_less_zero_point;
- const long voutput_max_less_zero_point = params->${PARAMS_STRUCT}.output_max_less_zero_point;
+ $if VARIANT == "LRINTF":
+ const float voutput_min_less_zero_point = params->${PARAMS_STRUCT}.output_min_less_zero_point;
+ const float voutput_max_less_zero_point = params->${PARAMS_STRUCT}.output_max_less_zero_point;
const int32_t voutput_zero_point = params->${PARAMS_STRUCT}.output_zero_point;
- $elif VARIANT == "MAGIC":
+ $elif VARIANT == "FMAGIC":
const float voutput_min_less_zero_point = params->${PARAMS_STRUCT}.output_min_less_zero_point;
const float voutput_max_less_zero_point = params->${PARAMS_STRUCT}.output_max_less_zero_point;
const float vmagic_bias = params->${PARAMS_STRUCT}.magic_bias;
@@ -85,13 +86,13 @@
$else:
const float vscale = *((const float*) w);
w = (const void*) ((const float*) w + 1);
- $if VARIANT == "LRINT":
+ $if VARIANT == "LRINTF":
const float vfpacc = (float) vacc * vscale;
long vrndacc = lrintf(vfpacc);
vrndacc = XNN_UNPREDICTABLE(vrndacc < voutput_min_less_zero_point) ? voutput_min_less_zero_point : vrndacc;
vrndacc = XNN_UNPREDICTABLE(vrndacc > voutput_max_less_zero_point) ? voutput_max_less_zero_point : vrndacc;
int32_t vout = (int32_t) vrndacc + voutput_zero_point;
- $elif VARIANT == "MAGIC":
+ $elif VARIANT == "FMAGIC":
float vfpacc = (float) vacc * vscale;
vfpacc = math_max_f32(vfpacc, voutput_min_less_zero_point);
vfpacc = math_min_f32(vfpacc, voutput_max_less_zero_point);
@@ -144,19 +145,19 @@
$for C in range(CHANNEL_TILE):
vfpacc${C} *= vscale;
- $if VARIANT == "LRINT":
+ $if VARIANT == "LRINTF":
$for C in range(CHANNEL_TILE):
- long vrndacc${C} = lrintf(vfpacc${C});
+ vfpacc${C} = math_max_f32(vfpacc${C}, voutput_min_less_zero_point);
$for C in range(CHANNEL_TILE):
- vrndacc${C} = XNN_UNPREDICTABLE(vrndacc${C} < voutput_min_less_zero_point) ? voutput_min_less_zero_point : vrndacc${C};
+ vfpacc${C} = math_min_f32(vfpacc${C}, voutput_max_less_zero_point);
$for C in range(CHANNEL_TILE):
- vrndacc${C} = XNN_UNPREDICTABLE(vrndacc${C} > voutput_max_less_zero_point) ? voutput_max_less_zero_point : vrndacc${C};
+ const int32_t vrndacc${C} = (int32_t) lrintf(vfpacc${C});
$for C in range(CHANNEL_TILE):
int32_t vout${C} = (int32_t) vrndacc${C} + voutput_zero_point;
- $elif VARIANT == "MAGIC":
+ $elif VARIANT == "FMAGIC":
$for C in range(CHANNEL_TILE):
vfpacc${C} = math_max_f32(vfpacc${C}, voutput_min_less_zero_point);
@@ -194,13 +195,13 @@
const float vscale = *((const unaligned_float*) ((uintptr_t) w + ${CHANNEL_TILE} * sizeof(int32_t) + ${KERNEL_TILE * CHANNEL_TILE} * sizeof(${XINT8_T})));
$else:
const float vscale = *((const float*) ((uintptr_t) w + ${CHANNEL_TILE} * sizeof(int32_t) + ${KERNEL_TILE * CHANNEL_TILE} * sizeof(${XINT8_T})));
- $if VARIANT == "LRINT":
- const float vfpacc = (float) vacc * vscale;
- long vrndacc = lrintf(vfpacc);
- vrndacc = XNN_UNPREDICTABLE(vrndacc < voutput_min_less_zero_point) ? voutput_min_less_zero_point : vrndacc;
- vrndacc = XNN_UNPREDICTABLE(vrndacc > voutput_max_less_zero_point) ? voutput_max_less_zero_point : vrndacc;
- int32_t vout = (int32_t) vrndacc + voutput_zero_point;
- $elif VARIANT == "MAGIC":
+ $if VARIANT == "LRINTF":
+ float vfpacc = (float) vacc * vscale;
+ vfpacc = math_max_f32(vfpacc, voutput_min_less_zero_point);
+ vfpacc = math_min_f32(vfpacc, voutput_max_less_zero_point);
+ const int32_t vrndacc = (int32_t) lrintf(vfpacc);
+ int32_t vout = vrndacc + voutput_zero_point;
+ $elif VARIANT == "FMAGIC":
float vfpacc = (float) vacc * vscale;
vfpacc = math_max_f32(vfpacc, voutput_min_less_zero_point);
vfpacc = math_min_f32(vfpacc, voutput_max_less_zero_point);
@@ -232,13 +233,13 @@
const float vscale = *((const unaligned_float*) ((uintptr_t) w + ${CHANNEL_TILE - 1} * sizeof(int32_t) + ${KERNEL_TILE * CHANNEL_TILE} * sizeof(${XINT8_T})));
$else:
const float vscale = *((const float*) ((uintptr_t) w + ${CHANNEL_TILE - 1} * sizeof(int32_t) + ${KERNEL_TILE * CHANNEL_TILE} * sizeof(${XINT8_T})));
- $if VARIANT == "LRINT":
- const float vfpacc = (float) vacc * vscale;
- long vrndacc = lrintf(vfpacc);
- vrndacc = XNN_UNPREDICTABLE(vrndacc < voutput_min_less_zero_point) ? voutput_min_less_zero_point : vrndacc;
- vrndacc = XNN_UNPREDICTABLE(vrndacc > voutput_max_less_zero_point) ? voutput_max_less_zero_point : vrndacc;
- int32_t vout = (int32_t) vrndacc + voutput_zero_point;
- $elif VARIANT == "MAGIC":
+ $if VARIANT == "LRINTF":
+ float vfpacc = (float) vacc * vscale;
+ vfpacc = math_max_f32(vfpacc, voutput_min_less_zero_point);
+ vfpacc = math_min_f32(vfpacc, voutput_max_less_zero_point);
+ const int32_t vrndacc = (int32_t) lrintf(vfpacc);
+ int32_t vout = vrndacc + voutput_zero_point;
+ $elif VARIANT == "FMAGIC":
float vfpacc = (float) vacc * vscale;
vfpacc = math_max_f32(vfpacc, voutput_min_less_zero_point);
vfpacc = math_min_f32(vfpacc, voutput_max_less_zero_point);
diff --git a/src/qs8-gemm/gen/1x2-minmax-fp32-scalar-magic.c b/src/qs8-gemm/gen/1x2-minmax-fp32-scalar-fmagic.c
similarity index 88%
rename from src/qs8-gemm/gen/1x2-minmax-fp32-scalar-magic.c
rename to src/qs8-gemm/gen/1x2-minmax-fp32-scalar-fmagic.c
index 982c058..d3032f4 100644
--- a/src/qs8-gemm/gen/1x2-minmax-fp32-scalar-magic.c
+++ b/src/qs8-gemm/gen/1x2-minmax-fp32-scalar-fmagic.c
@@ -15,7 +15,7 @@
#include <xnnpack/gemm.h>
-void xnn_qs8_gemm_minmax_fp32_ukernel_1x2__scalar_magic(
+void xnn_qs8_gemm_minmax_fp32_ukernel_1x2__scalar_fmagic(
size_t mr,
size_t nc,
size_t kc,
@@ -57,23 +57,23 @@
float vfpacc0x0 = (float) vacc0x0;
float vfpacc0x1 = (float) vacc0x1;
- const float vscale = params->fp32_scalar_magic.scale;
+ const float vscale = params->fp32_scalar_fmagic.scale;
vfpacc0x0 *= vscale;
vfpacc0x1 *= vscale;
- const float voutput_min_less_zero_point = params->fp32_scalar_magic.output_min_less_zero_point;
+ const float voutput_min_less_zero_point = params->fp32_scalar_fmagic.output_min_less_zero_point;
vfpacc0x0 = math_max_f32(vfpacc0x0, voutput_min_less_zero_point);
vfpacc0x1 = math_max_f32(vfpacc0x1, voutput_min_less_zero_point);
- const float voutput_max_less_zero_point = params->fp32_scalar_magic.output_max_less_zero_point;
+ const float voutput_max_less_zero_point = params->fp32_scalar_fmagic.output_max_less_zero_point;
vfpacc0x0 = math_min_f32(vfpacc0x0, voutput_max_less_zero_point);
vfpacc0x1 = math_min_f32(vfpacc0x1, voutput_max_less_zero_point);
- const float vmagic_bias = params->fp32_scalar_magic.magic_bias;
+ const float vmagic_bias = params->fp32_scalar_fmagic.magic_bias;
vfpacc0x0 += vmagic_bias;
vfpacc0x1 += vmagic_bias;
- const int32_t vmagic_bias_less_output_zero_point = params->fp32_scalar_magic.magic_bias_less_output_zero_point;
+ const int32_t vmagic_bias_less_output_zero_point = params->fp32_scalar_fmagic.magic_bias_less_output_zero_point;
int32_t vout0x0 = (int32_t) fp32_to_bits(vfpacc0x0) - vmagic_bias_less_output_zero_point;
int32_t vout0x1 = (int32_t) fp32_to_bits(vfpacc0x1) - vmagic_bias_less_output_zero_point;
diff --git a/src/qs8-gemm/gen/1x2-minmax-fp32-scalar-lrint.c b/src/qs8-gemm/gen/1x2-minmax-fp32-scalar-lrint.c
deleted file mode 100644
index 817f598..0000000
--- a/src/qs8-gemm/gen/1x2-minmax-fp32-scalar-lrint.c
+++ /dev/null
@@ -1,95 +0,0 @@
-// Auto-generated file. Do not edit!
-// Template: src/qs8-gemm/scalar.c.in
-// Generator: tools/xngen
-//
-// Copyright 2021 Google LLC
-//
-// This source code is licensed under the BSD-style license found in the
-// LICENSE file in the root directory of this source tree.
-
-#include <assert.h>
-#include <math.h>
-
-#include <xnnpack/math.h>
-#include <xnnpack/gemm.h>
-
-
-void xnn_qs8_gemm_minmax_fp32_ukernel_1x2__scalar_lrint(
- size_t mr,
- size_t nc,
- size_t kc,
- const int8_t* restrict a,
- size_t a_stride,
- const void* restrict w,
- int8_t* restrict c,
- size_t cm_stride,
- size_t cn_stride,
- const union xnn_qs8_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
-{
- assert(mr != 0);
- assert(mr <= 1);
- assert(nc != 0);
- assert(kc != 0);
-
- const int8_t* a0 = a;
- int8_t* c0 = c;
-
- do {
- int32_t vacc0x0 = ((const int32_t*) w)[0];
- int32_t vacc0x1 = ((const int32_t*) w)[1];
- w = (const void*) ((const int32_t*) w + 2);
-
- size_t k = kc;
- do {
- const int32_t va0 = (int32_t) *a0++;
-
- const int32_t vb0 = (int32_t) ((const int8_t*) w)[0];
- const int32_t vb1 = (int32_t) ((const int8_t*) w)[1];
- w = (const void*) ((const int8_t*) w + 2);
-
- vacc0x0 += va0 * vb0;
- vacc0x1 += va0 * vb1;
-
- k -= sizeof(int8_t);
- } while (k != 0);
-
- float vfpacc0x0 = (float) vacc0x0;
- float vfpacc0x1 = (float) vacc0x1;
-
- const float vscale = params->fp32_scalar_lrint.scale;
- vfpacc0x0 *= vscale;
- vfpacc0x1 *= vscale;
-
- long vrndacc0x0 = lrintf(vfpacc0x0);
- long vrndacc0x1 = lrintf(vfpacc0x1);
-
- const long voutput_min_less_zero_point = params->fp32_scalar_lrint.output_min_less_zero_point;
- vrndacc0x0 = XNN_UNPREDICTABLE(vrndacc0x0 < voutput_min_less_zero_point) ? voutput_min_less_zero_point : vrndacc0x0;
- vrndacc0x1 = XNN_UNPREDICTABLE(vrndacc0x1 < voutput_min_less_zero_point) ? voutput_min_less_zero_point : vrndacc0x1;
-
- const long voutput_max_less_zero_point = params->fp32_scalar_lrint.output_max_less_zero_point;
- vrndacc0x0 = XNN_UNPREDICTABLE(vrndacc0x0 > voutput_max_less_zero_point) ? voutput_max_less_zero_point : vrndacc0x0;
- vrndacc0x1 = XNN_UNPREDICTABLE(vrndacc0x1 > voutput_max_less_zero_point) ? voutput_max_less_zero_point : vrndacc0x1;
-
- const int32_t voutput_zero_point = params->fp32_scalar_lrint.output_zero_point;
- int32_t vout0x0 = (int32_t) vrndacc0x0 + voutput_zero_point;
- int32_t vout0x1 = (int32_t) vrndacc0x1 + voutput_zero_point;
-
- if XNN_LIKELY(nc >= 2) {
- c0[0] = (int8_t) vout0x0;
- c0[1] = (int8_t) vout0x1;
-
- a0 = (const int8_t*) ((uintptr_t) a0 - kc);
-
- c0 = (int8_t*) ((uintptr_t) c0 + cn_stride);
-
- nc -= 2;
- } else {
- if (nc & 1) {
- c0[0] = (int8_t) vout0x0;
- }
-
- nc = 0;
- }
- } while (nc != 0);
-}
diff --git a/src/qs8-gemm/gen/1x2-minmax-fp32-scalar-magic.c b/src/qs8-gemm/gen/1x2-minmax-fp32-scalar-lrintf.c
similarity index 76%
copy from src/qs8-gemm/gen/1x2-minmax-fp32-scalar-magic.c
copy to src/qs8-gemm/gen/1x2-minmax-fp32-scalar-lrintf.c
index 982c058..d0cd407 100644
--- a/src/qs8-gemm/gen/1x2-minmax-fp32-scalar-magic.c
+++ b/src/qs8-gemm/gen/1x2-minmax-fp32-scalar-lrintf.c
@@ -8,14 +8,13 @@
// LICENSE file in the root directory of this source tree.
#include <assert.h>
-
-#include <fp16.h>
+#include <math.h>
#include <xnnpack/math.h>
#include <xnnpack/gemm.h>
-void xnn_qs8_gemm_minmax_fp32_ukernel_1x2__scalar_magic(
+void xnn_qs8_gemm_minmax_fp32_ukernel_1x2__scalar_lrintf(
size_t mr,
size_t nc,
size_t kc,
@@ -57,25 +56,24 @@
float vfpacc0x0 = (float) vacc0x0;
float vfpacc0x1 = (float) vacc0x1;
- const float vscale = params->fp32_scalar_magic.scale;
+ const float vscale = params->fp32_scalar_lrintf.scale;
vfpacc0x0 *= vscale;
vfpacc0x1 *= vscale;
- const float voutput_min_less_zero_point = params->fp32_scalar_magic.output_min_less_zero_point;
+ const float voutput_min_less_zero_point = params->fp32_scalar_lrintf.output_min_less_zero_point;
vfpacc0x0 = math_max_f32(vfpacc0x0, voutput_min_less_zero_point);
vfpacc0x1 = math_max_f32(vfpacc0x1, voutput_min_less_zero_point);
- const float voutput_max_less_zero_point = params->fp32_scalar_magic.output_max_less_zero_point;
+ const float voutput_max_less_zero_point = params->fp32_scalar_lrintf.output_max_less_zero_point;
vfpacc0x0 = math_min_f32(vfpacc0x0, voutput_max_less_zero_point);
vfpacc0x1 = math_min_f32(vfpacc0x1, voutput_max_less_zero_point);
- const float vmagic_bias = params->fp32_scalar_magic.magic_bias;
- vfpacc0x0 += vmagic_bias;
- vfpacc0x1 += vmagic_bias;
+ const int32_t vrndacc0x0 = (int32_t) lrintf(vfpacc0x0);
+ const int32_t vrndacc0x1 = (int32_t) lrintf(vfpacc0x1);
- const int32_t vmagic_bias_less_output_zero_point = params->fp32_scalar_magic.magic_bias_less_output_zero_point;
- int32_t vout0x0 = (int32_t) fp32_to_bits(vfpacc0x0) - vmagic_bias_less_output_zero_point;
- int32_t vout0x1 = (int32_t) fp32_to_bits(vfpacc0x1) - vmagic_bias_less_output_zero_point;
+ const int32_t voutput_zero_point = params->fp32_scalar_lrintf.output_zero_point;
+ int32_t vout0x0 = vrndacc0x0 + voutput_zero_point;
+ int32_t vout0x1 = vrndacc0x1 + voutput_zero_point;
if XNN_LIKELY(nc >= 2) {
c0[0] = (int8_t) vout0x0;
diff --git a/src/qs8-gemm/gen/1x2-minmax-rndnu-scalar.c b/src/qs8-gemm/gen/1x2-minmax-rndnu-scalar.c
deleted file mode 100644
index 12b43bb..0000000
--- a/src/qs8-gemm/gen/1x2-minmax-rndnu-scalar.c
+++ /dev/null
@@ -1,93 +0,0 @@
-// Auto-generated file. Do not edit!
-// Template: src/qs8-gemm/scalar.c.in
-// Generator: tools/xngen
-//
-// Copyright 2021 Google LLC
-//
-// This source code is licensed under the BSD-style license found in the
-// LICENSE file in the root directory of this source tree.
-
-#include <assert.h>
-
-#include <xnnpack/math.h>
-#include <xnnpack/gemm.h>
-
-
-void xnn_qs8_gemm_minmax_rndnu_ukernel_1x2__scalar(
- size_t mr,
- size_t nc,
- size_t kc,
- const int8_t* restrict a,
- size_t a_stride,
- const void* restrict w,
- int8_t* restrict c,
- size_t cm_stride,
- size_t cn_stride,
- const union xnn_qs8_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
-{
- assert(mr != 0);
- assert(mr <= 1);
- assert(nc != 0);
- assert(kc != 0);
-
- const int8_t* a0 = a;
- int8_t* c0 = c;
-
- do {
- int32_t vacc0x0 = ((const int32_t*) w)[0];
- int32_t vacc0x1 = ((const int32_t*) w)[1];
- w = (const void*) ((const int32_t*) w + 2);
-
- size_t k = kc;
- do {
- const int32_t va0 = (int32_t) *a0++;
-
- const int32_t vb0 = (int32_t) ((const int8_t*) w)[0];
- const int32_t vb1 = (int32_t) ((const int8_t*) w)[1];
- w = (const void*) ((const int8_t*) w + 2);
-
- vacc0x0 += va0 * vb0;
- vacc0x1 += va0 * vb1;
-
- k -= sizeof(int8_t);
- } while (k != 0);
-
- const int32_t vmultiplier = params->rndnu_scalar.multiplier;
- const int64_t vproduct0x0 = (int64_t) vacc0x0 * (int64_t) vmultiplier;
- const int64_t vproduct0x1 = (int64_t) vacc0x1 * (int64_t) vmultiplier;
-
- const uint32_t shift = params->rndnu_scalar.shift;
- const int64_t rounding = params->rndnu_scalar.rounding;
- int32_t vout0x0 = (int32_t) asr_s64(vproduct0x0 + rounding, shift);
- int32_t vout0x1 = (int32_t) asr_s64(vproduct0x1 + rounding, shift);
-
- const int32_t voutput_min_less_zero_point = params->rndnu_scalar.output_min_less_zero_point;
- vout0x0 = math_max_s32(vout0x0, voutput_min_less_zero_point);
- vout0x1 = math_max_s32(vout0x1, voutput_min_less_zero_point);
-
- const int32_t voutput_max_less_zero_point = params->rndnu_scalar.output_max_less_zero_point;
- vout0x0 = math_min_s32(vout0x0, voutput_max_less_zero_point);
- vout0x1 = math_min_s32(vout0x1, voutput_max_less_zero_point);
-
- const int32_t voutput_zero_point = params->rndnu_scalar.output_zero_point;
- vout0x0 += voutput_zero_point;
- vout0x1 += voutput_zero_point;
-
- if XNN_LIKELY(nc >= 2) {
- c0[0] = (int8_t) vout0x0;
- c0[1] = (int8_t) vout0x1;
-
- a0 = (const int8_t*) ((uintptr_t) a0 - kc);
-
- c0 = (int8_t*) ((uintptr_t) c0 + cn_stride);
-
- nc -= 2;
- } else {
- if (nc & 1) {
- c0[0] = (int8_t) vout0x0;
- }
-
- nc = 0;
- }
- } while (nc != 0);
-}
diff --git a/src/qs8-gemm/gen/1x4-minmax-fp32-scalar-magic.c b/src/qs8-gemm/gen/1x4-minmax-fp32-scalar-fmagic.c
similarity index 91%
rename from src/qs8-gemm/gen/1x4-minmax-fp32-scalar-magic.c
rename to src/qs8-gemm/gen/1x4-minmax-fp32-scalar-fmagic.c
index ccfbed6..a74140b 100644
--- a/src/qs8-gemm/gen/1x4-minmax-fp32-scalar-magic.c
+++ b/src/qs8-gemm/gen/1x4-minmax-fp32-scalar-fmagic.c
@@ -15,7 +15,7 @@
#include <xnnpack/gemm.h>
-void xnn_qs8_gemm_minmax_fp32_ukernel_1x4__scalar_magic(
+void xnn_qs8_gemm_minmax_fp32_ukernel_1x4__scalar_fmagic(
size_t mr,
size_t nc,
size_t kc,
@@ -65,31 +65,31 @@
float vfpacc0x2 = (float) vacc0x2;
float vfpacc0x3 = (float) vacc0x3;
- const float vscale = params->fp32_scalar_magic.scale;
+ const float vscale = params->fp32_scalar_fmagic.scale;
vfpacc0x0 *= vscale;
vfpacc0x1 *= vscale;
vfpacc0x2 *= vscale;
vfpacc0x3 *= vscale;
- const float voutput_min_less_zero_point = params->fp32_scalar_magic.output_min_less_zero_point;
+ const float voutput_min_less_zero_point = params->fp32_scalar_fmagic.output_min_less_zero_point;
vfpacc0x0 = math_max_f32(vfpacc0x0, voutput_min_less_zero_point);
vfpacc0x1 = math_max_f32(vfpacc0x1, voutput_min_less_zero_point);
vfpacc0x2 = math_max_f32(vfpacc0x2, voutput_min_less_zero_point);
vfpacc0x3 = math_max_f32(vfpacc0x3, voutput_min_less_zero_point);
- const float voutput_max_less_zero_point = params->fp32_scalar_magic.output_max_less_zero_point;
+ const float voutput_max_less_zero_point = params->fp32_scalar_fmagic.output_max_less_zero_point;
vfpacc0x0 = math_min_f32(vfpacc0x0, voutput_max_less_zero_point);
vfpacc0x1 = math_min_f32(vfpacc0x1, voutput_max_less_zero_point);
vfpacc0x2 = math_min_f32(vfpacc0x2, voutput_max_less_zero_point);
vfpacc0x3 = math_min_f32(vfpacc0x3, voutput_max_less_zero_point);
- const float vmagic_bias = params->fp32_scalar_magic.magic_bias;
+ const float vmagic_bias = params->fp32_scalar_fmagic.magic_bias;
vfpacc0x0 += vmagic_bias;
vfpacc0x1 += vmagic_bias;
vfpacc0x2 += vmagic_bias;
vfpacc0x3 += vmagic_bias;
- const int32_t vmagic_bias_less_output_zero_point = params->fp32_scalar_magic.magic_bias_less_output_zero_point;
+ const int32_t vmagic_bias_less_output_zero_point = params->fp32_scalar_fmagic.magic_bias_less_output_zero_point;
int32_t vout0x0 = (int32_t) fp32_to_bits(vfpacc0x0) - vmagic_bias_less_output_zero_point;
int32_t vout0x1 = (int32_t) fp32_to_bits(vfpacc0x1) - vmagic_bias_less_output_zero_point;
int32_t vout0x2 = (int32_t) fp32_to_bits(vfpacc0x2) - vmagic_bias_less_output_zero_point;
diff --git a/src/qs8-gemm/gen/1x4-minmax-fp32-scalar-lrint.c b/src/qs8-gemm/gen/1x4-minmax-fp32-scalar-lrint.c
deleted file mode 100644
index b72f8c2..0000000
--- a/src/qs8-gemm/gen/1x4-minmax-fp32-scalar-lrint.c
+++ /dev/null
@@ -1,121 +0,0 @@
-// Auto-generated file. Do not edit!
-// Template: src/qs8-gemm/scalar.c.in
-// Generator: tools/xngen
-//
-// Copyright 2021 Google LLC
-//
-// This source code is licensed under the BSD-style license found in the
-// LICENSE file in the root directory of this source tree.
-
-#include <assert.h>
-#include <math.h>
-
-#include <xnnpack/math.h>
-#include <xnnpack/gemm.h>
-
-
-void xnn_qs8_gemm_minmax_fp32_ukernel_1x4__scalar_lrint(
- size_t mr,
- size_t nc,
- size_t kc,
- const int8_t* restrict a,
- size_t a_stride,
- const void* restrict w,
- int8_t* restrict c,
- size_t cm_stride,
- size_t cn_stride,
- const union xnn_qs8_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
-{
- assert(mr != 0);
- assert(mr <= 1);
- assert(nc != 0);
- assert(kc != 0);
-
- const int8_t* a0 = a;
- int8_t* c0 = c;
-
- do {
- int32_t vacc0x0 = ((const int32_t*) w)[0];
- int32_t vacc0x1 = ((const int32_t*) w)[1];
- int32_t vacc0x2 = ((const int32_t*) w)[2];
- int32_t vacc0x3 = ((const int32_t*) w)[3];
- w = (const void*) ((const int32_t*) w + 4);
-
- size_t k = kc;
- do {
- const int32_t va0 = (int32_t) *a0++;
-
- const int32_t vb0 = (int32_t) ((const int8_t*) w)[0];
- const int32_t vb1 = (int32_t) ((const int8_t*) w)[1];
- const int32_t vb2 = (int32_t) ((const int8_t*) w)[2];
- const int32_t vb3 = (int32_t) ((const int8_t*) w)[3];
- w = (const void*) ((const int8_t*) w + 4);
-
- vacc0x0 += va0 * vb0;
- vacc0x1 += va0 * vb1;
- vacc0x2 += va0 * vb2;
- vacc0x3 += va0 * vb3;
-
- k -= sizeof(int8_t);
- } while (k != 0);
-
- float vfpacc0x0 = (float) vacc0x0;
- float vfpacc0x1 = (float) vacc0x1;
- float vfpacc0x2 = (float) vacc0x2;
- float vfpacc0x3 = (float) vacc0x3;
-
- const float vscale = params->fp32_scalar_lrint.scale;
- vfpacc0x0 *= vscale;
- vfpacc0x1 *= vscale;
- vfpacc0x2 *= vscale;
- vfpacc0x3 *= vscale;
-
- long vrndacc0x0 = lrintf(vfpacc0x0);
- long vrndacc0x1 = lrintf(vfpacc0x1);
- long vrndacc0x2 = lrintf(vfpacc0x2);
- long vrndacc0x3 = lrintf(vfpacc0x3);
-
- const long voutput_min_less_zero_point = params->fp32_scalar_lrint.output_min_less_zero_point;
- vrndacc0x0 = XNN_UNPREDICTABLE(vrndacc0x0 < voutput_min_less_zero_point) ? voutput_min_less_zero_point : vrndacc0x0;
- vrndacc0x1 = XNN_UNPREDICTABLE(vrndacc0x1 < voutput_min_less_zero_point) ? voutput_min_less_zero_point : vrndacc0x1;
- vrndacc0x2 = XNN_UNPREDICTABLE(vrndacc0x2 < voutput_min_less_zero_point) ? voutput_min_less_zero_point : vrndacc0x2;
- vrndacc0x3 = XNN_UNPREDICTABLE(vrndacc0x3 < voutput_min_less_zero_point) ? voutput_min_less_zero_point : vrndacc0x3;
-
- const long voutput_max_less_zero_point = params->fp32_scalar_lrint.output_max_less_zero_point;
- vrndacc0x0 = XNN_UNPREDICTABLE(vrndacc0x0 > voutput_max_less_zero_point) ? voutput_max_less_zero_point : vrndacc0x0;
- vrndacc0x1 = XNN_UNPREDICTABLE(vrndacc0x1 > voutput_max_less_zero_point) ? voutput_max_less_zero_point : vrndacc0x1;
- vrndacc0x2 = XNN_UNPREDICTABLE(vrndacc0x2 > voutput_max_less_zero_point) ? voutput_max_less_zero_point : vrndacc0x2;
- vrndacc0x3 = XNN_UNPREDICTABLE(vrndacc0x3 > voutput_max_less_zero_point) ? voutput_max_less_zero_point : vrndacc0x3;
-
- const int32_t voutput_zero_point = params->fp32_scalar_lrint.output_zero_point;
- int32_t vout0x0 = (int32_t) vrndacc0x0 + voutput_zero_point;
- int32_t vout0x1 = (int32_t) vrndacc0x1 + voutput_zero_point;
- int32_t vout0x2 = (int32_t) vrndacc0x2 + voutput_zero_point;
- int32_t vout0x3 = (int32_t) vrndacc0x3 + voutput_zero_point;
-
- if XNN_LIKELY(nc >= 4) {
- c0[0] = (int8_t) vout0x0;
- c0[1] = (int8_t) vout0x1;
- c0[2] = (int8_t) vout0x2;
- c0[3] = (int8_t) vout0x3;
-
- a0 = (const int8_t*) ((uintptr_t) a0 - kc);
-
- c0 = (int8_t*) ((uintptr_t) c0 + cn_stride);
-
- nc -= 4;
- } else {
- if (nc & 2) {
- c0[0] = (int8_t) vout0x0;
- c0[1] = (int8_t) vout0x1;
- vout0x0 = vout0x2;
- c0 += 2;
- }
- if (nc & 1) {
- c0[0] = (int8_t) vout0x0;
- }
-
- nc = 0;
- }
- } while (nc != 0);
-}
diff --git a/src/qs8-gemm/gen/1x4-minmax-fp32-scalar-magic.c b/src/qs8-gemm/gen/1x4-minmax-fp32-scalar-lrintf.c
similarity index 76%
copy from src/qs8-gemm/gen/1x4-minmax-fp32-scalar-magic.c
copy to src/qs8-gemm/gen/1x4-minmax-fp32-scalar-lrintf.c
index ccfbed6..00f9f03 100644
--- a/src/qs8-gemm/gen/1x4-minmax-fp32-scalar-magic.c
+++ b/src/qs8-gemm/gen/1x4-minmax-fp32-scalar-lrintf.c
@@ -8,14 +8,13 @@
// LICENSE file in the root directory of this source tree.
#include <assert.h>
-
-#include <fp16.h>
+#include <math.h>
#include <xnnpack/math.h>
#include <xnnpack/gemm.h>
-void xnn_qs8_gemm_minmax_fp32_ukernel_1x4__scalar_magic(
+void xnn_qs8_gemm_minmax_fp32_ukernel_1x4__scalar_lrintf(
size_t mr,
size_t nc,
size_t kc,
@@ -65,35 +64,34 @@
float vfpacc0x2 = (float) vacc0x2;
float vfpacc0x3 = (float) vacc0x3;
- const float vscale = params->fp32_scalar_magic.scale;
+ const float vscale = params->fp32_scalar_lrintf.scale;
vfpacc0x0 *= vscale;
vfpacc0x1 *= vscale;
vfpacc0x2 *= vscale;
vfpacc0x3 *= vscale;
- const float voutput_min_less_zero_point = params->fp32_scalar_magic.output_min_less_zero_point;
+ const float voutput_min_less_zero_point = params->fp32_scalar_lrintf.output_min_less_zero_point;
vfpacc0x0 = math_max_f32(vfpacc0x0, voutput_min_less_zero_point);
vfpacc0x1 = math_max_f32(vfpacc0x1, voutput_min_less_zero_point);
vfpacc0x2 = math_max_f32(vfpacc0x2, voutput_min_less_zero_point);
vfpacc0x3 = math_max_f32(vfpacc0x3, voutput_min_less_zero_point);
- const float voutput_max_less_zero_point = params->fp32_scalar_magic.output_max_less_zero_point;
+ const float voutput_max_less_zero_point = params->fp32_scalar_lrintf.output_max_less_zero_point;
vfpacc0x0 = math_min_f32(vfpacc0x0, voutput_max_less_zero_point);
vfpacc0x1 = math_min_f32(vfpacc0x1, voutput_max_less_zero_point);
vfpacc0x2 = math_min_f32(vfpacc0x2, voutput_max_less_zero_point);
vfpacc0x3 = math_min_f32(vfpacc0x3, voutput_max_less_zero_point);
- const float vmagic_bias = params->fp32_scalar_magic.magic_bias;
- vfpacc0x0 += vmagic_bias;
- vfpacc0x1 += vmagic_bias;
- vfpacc0x2 += vmagic_bias;
- vfpacc0x3 += vmagic_bias;
+ const int32_t vrndacc0x0 = (int32_t) lrintf(vfpacc0x0);
+ const int32_t vrndacc0x1 = (int32_t) lrintf(vfpacc0x1);
+ const int32_t vrndacc0x2 = (int32_t) lrintf(vfpacc0x2);
+ const int32_t vrndacc0x3 = (int32_t) lrintf(vfpacc0x3);
- const int32_t vmagic_bias_less_output_zero_point = params->fp32_scalar_magic.magic_bias_less_output_zero_point;
- int32_t vout0x0 = (int32_t) fp32_to_bits(vfpacc0x0) - vmagic_bias_less_output_zero_point;
- int32_t vout0x1 = (int32_t) fp32_to_bits(vfpacc0x1) - vmagic_bias_less_output_zero_point;
- int32_t vout0x2 = (int32_t) fp32_to_bits(vfpacc0x2) - vmagic_bias_less_output_zero_point;
- int32_t vout0x3 = (int32_t) fp32_to_bits(vfpacc0x3) - vmagic_bias_less_output_zero_point;
+ const int32_t voutput_zero_point = params->fp32_scalar_lrintf.output_zero_point;
+ int32_t vout0x0 = vrndacc0x0 + voutput_zero_point;
+ int32_t vout0x1 = vrndacc0x1 + voutput_zero_point;
+ int32_t vout0x2 = vrndacc0x2 + voutput_zero_point;
+ int32_t vout0x3 = vrndacc0x3 + voutput_zero_point;
if XNN_LIKELY(nc >= 4) {
c0[0] = (int8_t) vout0x0;
diff --git a/src/qs8-gemm/gen/1x4-minmax-rndnu-scalar.c b/src/qs8-gemm/gen/1x4-minmax-rndnu-scalar.c
deleted file mode 100644
index af5a0fe..0000000
--- a/src/qs8-gemm/gen/1x4-minmax-rndnu-scalar.c
+++ /dev/null
@@ -1,117 +0,0 @@
-// Auto-generated file. Do not edit!
-// Template: src/qs8-gemm/scalar.c.in
-// Generator: tools/xngen
-//
-// Copyright 2021 Google LLC
-//
-// This source code is licensed under the BSD-style license found in the
-// LICENSE file in the root directory of this source tree.
-
-#include <assert.h>
-
-#include <xnnpack/math.h>
-#include <xnnpack/gemm.h>
-
-
-void xnn_qs8_gemm_minmax_rndnu_ukernel_1x4__scalar(
- size_t mr,
- size_t nc,
- size_t kc,
- const int8_t* restrict a,
- size_t a_stride,
- const void* restrict w,
- int8_t* restrict c,
- size_t cm_stride,
- size_t cn_stride,
- const union xnn_qs8_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
-{
- assert(mr != 0);
- assert(mr <= 1);
- assert(nc != 0);
- assert(kc != 0);
-
- const int8_t* a0 = a;
- int8_t* c0 = c;
-
- do {
- int32_t vacc0x0 = ((const int32_t*) w)[0];
- int32_t vacc0x1 = ((const int32_t*) w)[1];
- int32_t vacc0x2 = ((const int32_t*) w)[2];
- int32_t vacc0x3 = ((const int32_t*) w)[3];
- w = (const void*) ((const int32_t*) w + 4);
-
- size_t k = kc;
- do {
- const int32_t va0 = (int32_t) *a0++;
-
- const int32_t vb0 = (int32_t) ((const int8_t*) w)[0];
- const int32_t vb1 = (int32_t) ((const int8_t*) w)[1];
- const int32_t vb2 = (int32_t) ((const int8_t*) w)[2];
- const int32_t vb3 = (int32_t) ((const int8_t*) w)[3];
- w = (const void*) ((const int8_t*) w + 4);
-
- vacc0x0 += va0 * vb0;
- vacc0x1 += va0 * vb1;
- vacc0x2 += va0 * vb2;
- vacc0x3 += va0 * vb3;
-
- k -= sizeof(int8_t);
- } while (k != 0);
-
- const int32_t vmultiplier = params->rndnu_scalar.multiplier;
- const int64_t vproduct0x0 = (int64_t) vacc0x0 * (int64_t) vmultiplier;
- const int64_t vproduct0x1 = (int64_t) vacc0x1 * (int64_t) vmultiplier;
- const int64_t vproduct0x2 = (int64_t) vacc0x2 * (int64_t) vmultiplier;
- const int64_t vproduct0x3 = (int64_t) vacc0x3 * (int64_t) vmultiplier;
-
- const uint32_t shift = params->rndnu_scalar.shift;
- const int64_t rounding = params->rndnu_scalar.rounding;
- int32_t vout0x0 = (int32_t) asr_s64(vproduct0x0 + rounding, shift);
- int32_t vout0x1 = (int32_t) asr_s64(vproduct0x1 + rounding, shift);
- int32_t vout0x2 = (int32_t) asr_s64(vproduct0x2 + rounding, shift);
- int32_t vout0x3 = (int32_t) asr_s64(vproduct0x3 + rounding, shift);
-
- const int32_t voutput_min_less_zero_point = params->rndnu_scalar.output_min_less_zero_point;
- vout0x0 = math_max_s32(vout0x0, voutput_min_less_zero_point);
- vout0x1 = math_max_s32(vout0x1, voutput_min_less_zero_point);
- vout0x2 = math_max_s32(vout0x2, voutput_min_less_zero_point);
- vout0x3 = math_max_s32(vout0x3, voutput_min_less_zero_point);
-
- const int32_t voutput_max_less_zero_point = params->rndnu_scalar.output_max_less_zero_point;
- vout0x0 = math_min_s32(vout0x0, voutput_max_less_zero_point);
- vout0x1 = math_min_s32(vout0x1, voutput_max_less_zero_point);
- vout0x2 = math_min_s32(vout0x2, voutput_max_less_zero_point);
- vout0x3 = math_min_s32(vout0x3, voutput_max_less_zero_point);
-
- const int32_t voutput_zero_point = params->rndnu_scalar.output_zero_point;
- vout0x0 += voutput_zero_point;
- vout0x1 += voutput_zero_point;
- vout0x2 += voutput_zero_point;
- vout0x3 += voutput_zero_point;
-
- if XNN_LIKELY(nc >= 4) {
- c0[0] = (int8_t) vout0x0;
- c0[1] = (int8_t) vout0x1;
- c0[2] = (int8_t) vout0x2;
- c0[3] = (int8_t) vout0x3;
-
- a0 = (const int8_t*) ((uintptr_t) a0 - kc);
-
- c0 = (int8_t*) ((uintptr_t) c0 + cn_stride);
-
- nc -= 4;
- } else {
- if (nc & 2) {
- c0[0] = (int8_t) vout0x0;
- c0[1] = (int8_t) vout0x1;
- vout0x0 = vout0x2;
- c0 += 2;
- }
- if (nc & 1) {
- c0[0] = (int8_t) vout0x0;
- }
-
- nc = 0;
- }
- } while (nc != 0);
-}
diff --git a/src/qs8-gemm/gen/2x2-minmax-fp32-scalar-magic.c b/src/qs8-gemm/gen/2x2-minmax-fp32-scalar-fmagic.c
similarity index 91%
rename from src/qs8-gemm/gen/2x2-minmax-fp32-scalar-magic.c
rename to src/qs8-gemm/gen/2x2-minmax-fp32-scalar-fmagic.c
index 593952f..6ee1bcb 100644
--- a/src/qs8-gemm/gen/2x2-minmax-fp32-scalar-magic.c
+++ b/src/qs8-gemm/gen/2x2-minmax-fp32-scalar-fmagic.c
@@ -15,7 +15,7 @@
#include <xnnpack/gemm.h>
-void xnn_qs8_gemm_minmax_fp32_ukernel_2x2__scalar_magic(
+void xnn_qs8_gemm_minmax_fp32_ukernel_2x2__scalar_fmagic(
size_t mr,
size_t nc,
size_t kc,
@@ -70,31 +70,31 @@
float vfpacc1x0 = (float) vacc1x0;
float vfpacc1x1 = (float) vacc1x1;
- const float vscale = params->fp32_scalar_magic.scale;
+ const float vscale = params->fp32_scalar_fmagic.scale;
vfpacc0x0 *= vscale;
vfpacc0x1 *= vscale;
vfpacc1x0 *= vscale;
vfpacc1x1 *= vscale;
- const float voutput_min_less_zero_point = params->fp32_scalar_magic.output_min_less_zero_point;
+ const float voutput_min_less_zero_point = params->fp32_scalar_fmagic.output_min_less_zero_point;
vfpacc0x0 = math_max_f32(vfpacc0x0, voutput_min_less_zero_point);
vfpacc0x1 = math_max_f32(vfpacc0x1, voutput_min_less_zero_point);
vfpacc1x0 = math_max_f32(vfpacc1x0, voutput_min_less_zero_point);
vfpacc1x1 = math_max_f32(vfpacc1x1, voutput_min_less_zero_point);
- const float voutput_max_less_zero_point = params->fp32_scalar_magic.output_max_less_zero_point;
+ const float voutput_max_less_zero_point = params->fp32_scalar_fmagic.output_max_less_zero_point;
vfpacc0x0 = math_min_f32(vfpacc0x0, voutput_max_less_zero_point);
vfpacc0x1 = math_min_f32(vfpacc0x1, voutput_max_less_zero_point);
vfpacc1x0 = math_min_f32(vfpacc1x0, voutput_max_less_zero_point);
vfpacc1x1 = math_min_f32(vfpacc1x1, voutput_max_less_zero_point);
- const float vmagic_bias = params->fp32_scalar_magic.magic_bias;
+ const float vmagic_bias = params->fp32_scalar_fmagic.magic_bias;
vfpacc0x0 += vmagic_bias;
vfpacc0x1 += vmagic_bias;
vfpacc1x0 += vmagic_bias;
vfpacc1x1 += vmagic_bias;
- const int32_t vmagic_bias_less_output_zero_point = params->fp32_scalar_magic.magic_bias_less_output_zero_point;
+ const int32_t vmagic_bias_less_output_zero_point = params->fp32_scalar_fmagic.magic_bias_less_output_zero_point;
int32_t vout0x0 = (int32_t) fp32_to_bits(vfpacc0x0) - vmagic_bias_less_output_zero_point;
int32_t vout0x1 = (int32_t) fp32_to_bits(vfpacc0x1) - vmagic_bias_less_output_zero_point;
int32_t vout1x0 = (int32_t) fp32_to_bits(vfpacc1x0) - vmagic_bias_less_output_zero_point;
diff --git a/src/qs8-gemm/gen/2x2-minmax-fp32-scalar-lrint.c b/src/qs8-gemm/gen/2x2-minmax-fp32-scalar-lrint.c
deleted file mode 100644
index 9eb73a9..0000000
--- a/src/qs8-gemm/gen/2x2-minmax-fp32-scalar-lrint.c
+++ /dev/null
@@ -1,123 +0,0 @@
-// Auto-generated file. Do not edit!
-// Template: src/qs8-gemm/scalar.c.in
-// Generator: tools/xngen
-//
-// Copyright 2021 Google LLC
-//
-// This source code is licensed under the BSD-style license found in the
-// LICENSE file in the root directory of this source tree.
-
-#include <assert.h>
-#include <math.h>
-
-#include <xnnpack/math.h>
-#include <xnnpack/gemm.h>
-
-
-void xnn_qs8_gemm_minmax_fp32_ukernel_2x2__scalar_lrint(
- size_t mr,
- size_t nc,
- size_t kc,
- const int8_t* restrict a,
- size_t a_stride,
- const void* restrict w,
- int8_t* restrict c,
- size_t cm_stride,
- size_t cn_stride,
- const union xnn_qs8_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
-{
- assert(mr != 0);
- assert(mr <= 2);
- assert(nc != 0);
- assert(kc != 0);
-
- const int8_t* a0 = a;
- int8_t* c0 = c;
- const int8_t* a1 = (const int8_t*) ((uintptr_t) a0 + a_stride);
- int8_t* c1 = (int8_t*) ((uintptr_t) c0 + cm_stride);
- if XNN_UNPREDICTABLE(mr != 2) {
- a1 = a0;
- c1 = c0;
- }
-
- do {
- int32_t vacc0x0 = ((const int32_t*) w)[0];
- int32_t vacc0x1 = ((const int32_t*) w)[1];
- int32_t vacc1x0 = vacc0x0;
- int32_t vacc1x1 = vacc0x1;
- w = (const void*) ((const int32_t*) w + 2);
-
- size_t k = kc;
- do {
- const int32_t va0 = (int32_t) *a0++;
- const int32_t va1 = (int32_t) *a1++;
-
- const int32_t vb0 = (int32_t) ((const int8_t*) w)[0];
- const int32_t vb1 = (int32_t) ((const int8_t*) w)[1];
- w = (const void*) ((const int8_t*) w + 2);
-
- vacc0x0 += va0 * vb0;
- vacc0x1 += va0 * vb1;
- vacc1x0 += va1 * vb0;
- vacc1x1 += va1 * vb1;
-
- k -= sizeof(int8_t);
- } while (k != 0);
-
- float vfpacc0x0 = (float) vacc0x0;
- float vfpacc0x1 = (float) vacc0x1;
- float vfpacc1x0 = (float) vacc1x0;
- float vfpacc1x1 = (float) vacc1x1;
-
- const float vscale = params->fp32_scalar_lrint.scale;
- vfpacc0x0 *= vscale;
- vfpacc0x1 *= vscale;
- vfpacc1x0 *= vscale;
- vfpacc1x1 *= vscale;
-
- long vrndacc0x0 = lrintf(vfpacc0x0);
- long vrndacc0x1 = lrintf(vfpacc0x1);
- long vrndacc1x0 = lrintf(vfpacc1x0);
- long vrndacc1x1 = lrintf(vfpacc1x1);
-
- const long voutput_min_less_zero_point = params->fp32_scalar_lrint.output_min_less_zero_point;
- vrndacc0x0 = XNN_UNPREDICTABLE(vrndacc0x0 < voutput_min_less_zero_point) ? voutput_min_less_zero_point : vrndacc0x0;
- vrndacc0x1 = XNN_UNPREDICTABLE(vrndacc0x1 < voutput_min_less_zero_point) ? voutput_min_less_zero_point : vrndacc0x1;
- vrndacc1x0 = XNN_UNPREDICTABLE(vrndacc1x0 < voutput_min_less_zero_point) ? voutput_min_less_zero_point : vrndacc1x0;
- vrndacc1x1 = XNN_UNPREDICTABLE(vrndacc1x1 < voutput_min_less_zero_point) ? voutput_min_less_zero_point : vrndacc1x1;
-
- const long voutput_max_less_zero_point = params->fp32_scalar_lrint.output_max_less_zero_point;
- vrndacc0x0 = XNN_UNPREDICTABLE(vrndacc0x0 > voutput_max_less_zero_point) ? voutput_max_less_zero_point : vrndacc0x0;
- vrndacc0x1 = XNN_UNPREDICTABLE(vrndacc0x1 > voutput_max_less_zero_point) ? voutput_max_less_zero_point : vrndacc0x1;
- vrndacc1x0 = XNN_UNPREDICTABLE(vrndacc1x0 > voutput_max_less_zero_point) ? voutput_max_less_zero_point : vrndacc1x0;
- vrndacc1x1 = XNN_UNPREDICTABLE(vrndacc1x1 > voutput_max_less_zero_point) ? voutput_max_less_zero_point : vrndacc1x1;
-
- const int32_t voutput_zero_point = params->fp32_scalar_lrint.output_zero_point;
- int32_t vout0x0 = (int32_t) vrndacc0x0 + voutput_zero_point;
- int32_t vout0x1 = (int32_t) vrndacc0x1 + voutput_zero_point;
- int32_t vout1x0 = (int32_t) vrndacc1x0 + voutput_zero_point;
- int32_t vout1x1 = (int32_t) vrndacc1x1 + voutput_zero_point;
-
- if XNN_LIKELY(nc >= 2) {
- c0[0] = (int8_t) vout0x0;
- c0[1] = (int8_t) vout0x1;
- c1[0] = (int8_t) vout1x0;
- c1[1] = (int8_t) vout1x1;
-
- a0 = (const int8_t*) ((uintptr_t) a0 - kc);
- a1 = (const int8_t*) ((uintptr_t) a1 - kc);
-
- c0 = (int8_t*) ((uintptr_t) c0 + cn_stride);
- c1 = (int8_t*) ((uintptr_t) c1 + cn_stride);
-
- nc -= 2;
- } else {
- if (nc & 1) {
- c0[0] = (int8_t) vout0x0;
- c1[0] = (int8_t) vout1x0;
- }
-
- nc = 0;
- }
- } while (nc != 0);
-}
diff --git a/src/qs8-gemm/gen/2x2-minmax-fp32-scalar-magic.c b/src/qs8-gemm/gen/2x2-minmax-fp32-scalar-lrintf.c
similarity index 77%
copy from src/qs8-gemm/gen/2x2-minmax-fp32-scalar-magic.c
copy to src/qs8-gemm/gen/2x2-minmax-fp32-scalar-lrintf.c
index 593952f..6310b8c 100644
--- a/src/qs8-gemm/gen/2x2-minmax-fp32-scalar-magic.c
+++ b/src/qs8-gemm/gen/2x2-minmax-fp32-scalar-lrintf.c
@@ -8,14 +8,13 @@
// LICENSE file in the root directory of this source tree.
#include <assert.h>
-
-#include <fp16.h>
+#include <math.h>
#include <xnnpack/math.h>
#include <xnnpack/gemm.h>
-void xnn_qs8_gemm_minmax_fp32_ukernel_2x2__scalar_magic(
+void xnn_qs8_gemm_minmax_fp32_ukernel_2x2__scalar_lrintf(
size_t mr,
size_t nc,
size_t kc,
@@ -70,35 +69,34 @@
float vfpacc1x0 = (float) vacc1x0;
float vfpacc1x1 = (float) vacc1x1;
- const float vscale = params->fp32_scalar_magic.scale;
+ const float vscale = params->fp32_scalar_lrintf.scale;
vfpacc0x0 *= vscale;
vfpacc0x1 *= vscale;
vfpacc1x0 *= vscale;
vfpacc1x1 *= vscale;
- const float voutput_min_less_zero_point = params->fp32_scalar_magic.output_min_less_zero_point;
+ const float voutput_min_less_zero_point = params->fp32_scalar_lrintf.output_min_less_zero_point;
vfpacc0x0 = math_max_f32(vfpacc0x0, voutput_min_less_zero_point);
vfpacc0x1 = math_max_f32(vfpacc0x1, voutput_min_less_zero_point);
vfpacc1x0 = math_max_f32(vfpacc1x0, voutput_min_less_zero_point);
vfpacc1x1 = math_max_f32(vfpacc1x1, voutput_min_less_zero_point);
- const float voutput_max_less_zero_point = params->fp32_scalar_magic.output_max_less_zero_point;
+ const float voutput_max_less_zero_point = params->fp32_scalar_lrintf.output_max_less_zero_point;
vfpacc0x0 = math_min_f32(vfpacc0x0, voutput_max_less_zero_point);
vfpacc0x1 = math_min_f32(vfpacc0x1, voutput_max_less_zero_point);
vfpacc1x0 = math_min_f32(vfpacc1x0, voutput_max_less_zero_point);
vfpacc1x1 = math_min_f32(vfpacc1x1, voutput_max_less_zero_point);
- const float vmagic_bias = params->fp32_scalar_magic.magic_bias;
- vfpacc0x0 += vmagic_bias;
- vfpacc0x1 += vmagic_bias;
- vfpacc1x0 += vmagic_bias;
- vfpacc1x1 += vmagic_bias;
+ const int32_t vrndacc0x0 = (int32_t) lrintf(vfpacc0x0);
+ const int32_t vrndacc0x1 = (int32_t) lrintf(vfpacc0x1);
+ const int32_t vrndacc1x0 = (int32_t) lrintf(vfpacc1x0);
+ const int32_t vrndacc1x1 = (int32_t) lrintf(vfpacc1x1);
- const int32_t vmagic_bias_less_output_zero_point = params->fp32_scalar_magic.magic_bias_less_output_zero_point;
- int32_t vout0x0 = (int32_t) fp32_to_bits(vfpacc0x0) - vmagic_bias_less_output_zero_point;
- int32_t vout0x1 = (int32_t) fp32_to_bits(vfpacc0x1) - vmagic_bias_less_output_zero_point;
- int32_t vout1x0 = (int32_t) fp32_to_bits(vfpacc1x0) - vmagic_bias_less_output_zero_point;
- int32_t vout1x1 = (int32_t) fp32_to_bits(vfpacc1x1) - vmagic_bias_less_output_zero_point;
+ const int32_t voutput_zero_point = params->fp32_scalar_lrintf.output_zero_point;
+ int32_t vout0x0 = vrndacc0x0 + voutput_zero_point;
+ int32_t vout0x1 = vrndacc0x1 + voutput_zero_point;
+ int32_t vout1x0 = vrndacc1x0 + voutput_zero_point;
+ int32_t vout1x1 = vrndacc1x1 + voutput_zero_point;
if XNN_LIKELY(nc >= 2) {
c0[0] = (int8_t) vout0x0;
diff --git a/src/qs8-gemm/gen/2x2-minmax-rndnu-scalar.c b/src/qs8-gemm/gen/2x2-minmax-rndnu-scalar.c
deleted file mode 100644
index 4fe633f..0000000
--- a/src/qs8-gemm/gen/2x2-minmax-rndnu-scalar.c
+++ /dev/null
@@ -1,119 +0,0 @@
-// Auto-generated file. Do not edit!
-// Template: src/qs8-gemm/scalar.c.in
-// Generator: tools/xngen
-//
-// Copyright 2021 Google LLC
-//
-// This source code is licensed under the BSD-style license found in the
-// LICENSE file in the root directory of this source tree.
-
-#include <assert.h>
-
-#include <xnnpack/math.h>
-#include <xnnpack/gemm.h>
-
-
-void xnn_qs8_gemm_minmax_rndnu_ukernel_2x2__scalar(
- size_t mr,
- size_t nc,
- size_t kc,
- const int8_t* restrict a,
- size_t a_stride,
- const void* restrict w,
- int8_t* restrict c,
- size_t cm_stride,
- size_t cn_stride,
- const union xnn_qs8_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
-{
- assert(mr != 0);
- assert(mr <= 2);
- assert(nc != 0);
- assert(kc != 0);
-
- const int8_t* a0 = a;
- int8_t* c0 = c;
- const int8_t* a1 = (const int8_t*) ((uintptr_t) a0 + a_stride);
- int8_t* c1 = (int8_t*) ((uintptr_t) c0 + cm_stride);
- if XNN_UNPREDICTABLE(mr != 2) {
- a1 = a0;
- c1 = c0;
- }
-
- do {
- int32_t vacc0x0 = ((const int32_t*) w)[0];
- int32_t vacc0x1 = ((const int32_t*) w)[1];
- int32_t vacc1x0 = vacc0x0;
- int32_t vacc1x1 = vacc0x1;
- w = (const void*) ((const int32_t*) w + 2);
-
- size_t k = kc;
- do {
- const int32_t va0 = (int32_t) *a0++;
- const int32_t va1 = (int32_t) *a1++;
-
- const int32_t vb0 = (int32_t) ((const int8_t*) w)[0];
- const int32_t vb1 = (int32_t) ((const int8_t*) w)[1];
- w = (const void*) ((const int8_t*) w + 2);
-
- vacc0x0 += va0 * vb0;
- vacc0x1 += va0 * vb1;
- vacc1x0 += va1 * vb0;
- vacc1x1 += va1 * vb1;
-
- k -= sizeof(int8_t);
- } while (k != 0);
-
- const int32_t vmultiplier = params->rndnu_scalar.multiplier;
- const int64_t vproduct0x0 = (int64_t) vacc0x0 * (int64_t) vmultiplier;
- const int64_t vproduct0x1 = (int64_t) vacc0x1 * (int64_t) vmultiplier;
- const int64_t vproduct1x0 = (int64_t) vacc1x0 * (int64_t) vmultiplier;
- const int64_t vproduct1x1 = (int64_t) vacc1x1 * (int64_t) vmultiplier;
-
- const uint32_t shift = params->rndnu_scalar.shift;
- const int64_t rounding = params->rndnu_scalar.rounding;
- int32_t vout0x0 = (int32_t) asr_s64(vproduct0x0 + rounding, shift);
- int32_t vout0x1 = (int32_t) asr_s64(vproduct0x1 + rounding, shift);
- int32_t vout1x0 = (int32_t) asr_s64(vproduct1x0 + rounding, shift);
- int32_t vout1x1 = (int32_t) asr_s64(vproduct1x1 + rounding, shift);
-
- const int32_t voutput_min_less_zero_point = params->rndnu_scalar.output_min_less_zero_point;
- vout0x0 = math_max_s32(vout0x0, voutput_min_less_zero_point);
- vout0x1 = math_max_s32(vout0x1, voutput_min_less_zero_point);
- vout1x0 = math_max_s32(vout1x0, voutput_min_less_zero_point);
- vout1x1 = math_max_s32(vout1x1, voutput_min_less_zero_point);
-
- const int32_t voutput_max_less_zero_point = params->rndnu_scalar.output_max_less_zero_point;
- vout0x0 = math_min_s32(vout0x0, voutput_max_less_zero_point);
- vout0x1 = math_min_s32(vout0x1, voutput_max_less_zero_point);
- vout1x0 = math_min_s32(vout1x0, voutput_max_less_zero_point);
- vout1x1 = math_min_s32(vout1x1, voutput_max_less_zero_point);
-
- const int32_t voutput_zero_point = params->rndnu_scalar.output_zero_point;
- vout0x0 += voutput_zero_point;
- vout0x1 += voutput_zero_point;
- vout1x0 += voutput_zero_point;
- vout1x1 += voutput_zero_point;
-
- if XNN_LIKELY(nc >= 2) {
- c0[0] = (int8_t) vout0x0;
- c0[1] = (int8_t) vout0x1;
- c1[0] = (int8_t) vout1x0;
- c1[1] = (int8_t) vout1x1;
-
- a0 = (const int8_t*) ((uintptr_t) a0 - kc);
- a1 = (const int8_t*) ((uintptr_t) a1 - kc);
-
- c0 = (int8_t*) ((uintptr_t) c0 + cn_stride);
- c1 = (int8_t*) ((uintptr_t) c1 + cn_stride);
-
- nc -= 2;
- } else {
- if (nc & 1) {
- c0[0] = (int8_t) vout0x0;
- c1[0] = (int8_t) vout1x0;
- }
-
- nc = 0;
- }
- } while (nc != 0);
-}
diff --git a/src/qs8-gemm/gen/2x4-minmax-fp32-scalar-magic.c b/src/qs8-gemm/gen/2x4-minmax-fp32-scalar-fmagic.c
similarity index 94%
rename from src/qs8-gemm/gen/2x4-minmax-fp32-scalar-magic.c
rename to src/qs8-gemm/gen/2x4-minmax-fp32-scalar-fmagic.c
index b3ea05a..58e569c 100644
--- a/src/qs8-gemm/gen/2x4-minmax-fp32-scalar-magic.c
+++ b/src/qs8-gemm/gen/2x4-minmax-fp32-scalar-fmagic.c
@@ -15,7 +15,7 @@
#include <xnnpack/gemm.h>
-void xnn_qs8_gemm_minmax_fp32_ukernel_2x4__scalar_magic(
+void xnn_qs8_gemm_minmax_fp32_ukernel_2x4__scalar_fmagic(
size_t mr,
size_t nc,
size_t kc,
@@ -84,7 +84,7 @@
float vfpacc1x2 = (float) vacc1x2;
float vfpacc1x3 = (float) vacc1x3;
- const float vscale = params->fp32_scalar_magic.scale;
+ const float vscale = params->fp32_scalar_fmagic.scale;
vfpacc0x0 *= vscale;
vfpacc0x1 *= vscale;
vfpacc0x2 *= vscale;
@@ -94,7 +94,7 @@
vfpacc1x2 *= vscale;
vfpacc1x3 *= vscale;
- const float voutput_min_less_zero_point = params->fp32_scalar_magic.output_min_less_zero_point;
+ const float voutput_min_less_zero_point = params->fp32_scalar_fmagic.output_min_less_zero_point;
vfpacc0x0 = math_max_f32(vfpacc0x0, voutput_min_less_zero_point);
vfpacc0x1 = math_max_f32(vfpacc0x1, voutput_min_less_zero_point);
vfpacc0x2 = math_max_f32(vfpacc0x2, voutput_min_less_zero_point);
@@ -104,7 +104,7 @@
vfpacc1x2 = math_max_f32(vfpacc1x2, voutput_min_less_zero_point);
vfpacc1x3 = math_max_f32(vfpacc1x3, voutput_min_less_zero_point);
- const float voutput_max_less_zero_point = params->fp32_scalar_magic.output_max_less_zero_point;
+ const float voutput_max_less_zero_point = params->fp32_scalar_fmagic.output_max_less_zero_point;
vfpacc0x0 = math_min_f32(vfpacc0x0, voutput_max_less_zero_point);
vfpacc0x1 = math_min_f32(vfpacc0x1, voutput_max_less_zero_point);
vfpacc0x2 = math_min_f32(vfpacc0x2, voutput_max_less_zero_point);
@@ -114,7 +114,7 @@
vfpacc1x2 = math_min_f32(vfpacc1x2, voutput_max_less_zero_point);
vfpacc1x3 = math_min_f32(vfpacc1x3, voutput_max_less_zero_point);
- const float vmagic_bias = params->fp32_scalar_magic.magic_bias;
+ const float vmagic_bias = params->fp32_scalar_fmagic.magic_bias;
vfpacc0x0 += vmagic_bias;
vfpacc0x1 += vmagic_bias;
vfpacc0x2 += vmagic_bias;
@@ -124,7 +124,7 @@
vfpacc1x2 += vmagic_bias;
vfpacc1x3 += vmagic_bias;
- const int32_t vmagic_bias_less_output_zero_point = params->fp32_scalar_magic.magic_bias_less_output_zero_point;
+ const int32_t vmagic_bias_less_output_zero_point = params->fp32_scalar_fmagic.magic_bias_less_output_zero_point;
int32_t vout0x0 = (int32_t) fp32_to_bits(vfpacc0x0) - vmagic_bias_less_output_zero_point;
int32_t vout0x1 = (int32_t) fp32_to_bits(vfpacc0x1) - vmagic_bias_less_output_zero_point;
int32_t vout0x2 = (int32_t) fp32_to_bits(vfpacc0x2) - vmagic_bias_less_output_zero_point;
diff --git a/src/qs8-gemm/gen/2x4-minmax-fp32-scalar-lrint.c b/src/qs8-gemm/gen/2x4-minmax-fp32-scalar-lrint.c
deleted file mode 100644
index b3b1a70..0000000
--- a/src/qs8-gemm/gen/2x4-minmax-fp32-scalar-lrint.c
+++ /dev/null
@@ -1,171 +0,0 @@
-// Auto-generated file. Do not edit!
-// Template: src/qs8-gemm/scalar.c.in
-// Generator: tools/xngen
-//
-// Copyright 2021 Google LLC
-//
-// This source code is licensed under the BSD-style license found in the
-// LICENSE file in the root directory of this source tree.
-
-#include <assert.h>
-#include <math.h>
-
-#include <xnnpack/math.h>
-#include <xnnpack/gemm.h>
-
-
-void xnn_qs8_gemm_minmax_fp32_ukernel_2x4__scalar_lrint(
- size_t mr,
- size_t nc,
- size_t kc,
- const int8_t* restrict a,
- size_t a_stride,
- const void* restrict w,
- int8_t* restrict c,
- size_t cm_stride,
- size_t cn_stride,
- const union xnn_qs8_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
-{
- assert(mr != 0);
- assert(mr <= 2);
- assert(nc != 0);
- assert(kc != 0);
-
- const int8_t* a0 = a;
- int8_t* c0 = c;
- const int8_t* a1 = (const int8_t*) ((uintptr_t) a0 + a_stride);
- int8_t* c1 = (int8_t*) ((uintptr_t) c0 + cm_stride);
- if XNN_UNPREDICTABLE(mr != 2) {
- a1 = a0;
- c1 = c0;
- }
-
- do {
- int32_t vacc0x0 = ((const int32_t*) w)[0];
- int32_t vacc0x1 = ((const int32_t*) w)[1];
- int32_t vacc0x2 = ((const int32_t*) w)[2];
- int32_t vacc0x3 = ((const int32_t*) w)[3];
- int32_t vacc1x0 = vacc0x0;
- int32_t vacc1x1 = vacc0x1;
- int32_t vacc1x2 = vacc0x2;
- int32_t vacc1x3 = vacc0x3;
- w = (const void*) ((const int32_t*) w + 4);
-
- size_t k = kc;
- do {
- const int32_t va0 = (int32_t) *a0++;
- const int32_t va1 = (int32_t) *a1++;
-
- const int32_t vb0 = (int32_t) ((const int8_t*) w)[0];
- const int32_t vb1 = (int32_t) ((const int8_t*) w)[1];
- const int32_t vb2 = (int32_t) ((const int8_t*) w)[2];
- const int32_t vb3 = (int32_t) ((const int8_t*) w)[3];
- w = (const void*) ((const int8_t*) w + 4);
-
- vacc0x0 += va0 * vb0;
- vacc0x1 += va0 * vb1;
- vacc0x2 += va0 * vb2;
- vacc0x3 += va0 * vb3;
- vacc1x0 += va1 * vb0;
- vacc1x1 += va1 * vb1;
- vacc1x2 += va1 * vb2;
- vacc1x3 += va1 * vb3;
-
- k -= sizeof(int8_t);
- } while (k != 0);
-
- float vfpacc0x0 = (float) vacc0x0;
- float vfpacc0x1 = (float) vacc0x1;
- float vfpacc0x2 = (float) vacc0x2;
- float vfpacc0x3 = (float) vacc0x3;
- float vfpacc1x0 = (float) vacc1x0;
- float vfpacc1x1 = (float) vacc1x1;
- float vfpacc1x2 = (float) vacc1x2;
- float vfpacc1x3 = (float) vacc1x3;
-
- const float vscale = params->fp32_scalar_lrint.scale;
- vfpacc0x0 *= vscale;
- vfpacc0x1 *= vscale;
- vfpacc0x2 *= vscale;
- vfpacc0x3 *= vscale;
- vfpacc1x0 *= vscale;
- vfpacc1x1 *= vscale;
- vfpacc1x2 *= vscale;
- vfpacc1x3 *= vscale;
-
- long vrndacc0x0 = lrintf(vfpacc0x0);
- long vrndacc0x1 = lrintf(vfpacc0x1);
- long vrndacc0x2 = lrintf(vfpacc0x2);
- long vrndacc0x3 = lrintf(vfpacc0x3);
- long vrndacc1x0 = lrintf(vfpacc1x0);
- long vrndacc1x1 = lrintf(vfpacc1x1);
- long vrndacc1x2 = lrintf(vfpacc1x2);
- long vrndacc1x3 = lrintf(vfpacc1x3);
-
- const long voutput_min_less_zero_point = params->fp32_scalar_lrint.output_min_less_zero_point;
- vrndacc0x0 = XNN_UNPREDICTABLE(vrndacc0x0 < voutput_min_less_zero_point) ? voutput_min_less_zero_point : vrndacc0x0;
- vrndacc0x1 = XNN_UNPREDICTABLE(vrndacc0x1 < voutput_min_less_zero_point) ? voutput_min_less_zero_point : vrndacc0x1;
- vrndacc0x2 = XNN_UNPREDICTABLE(vrndacc0x2 < voutput_min_less_zero_point) ? voutput_min_less_zero_point : vrndacc0x2;
- vrndacc0x3 = XNN_UNPREDICTABLE(vrndacc0x3 < voutput_min_less_zero_point) ? voutput_min_less_zero_point : vrndacc0x3;
- vrndacc1x0 = XNN_UNPREDICTABLE(vrndacc1x0 < voutput_min_less_zero_point) ? voutput_min_less_zero_point : vrndacc1x0;
- vrndacc1x1 = XNN_UNPREDICTABLE(vrndacc1x1 < voutput_min_less_zero_point) ? voutput_min_less_zero_point : vrndacc1x1;
- vrndacc1x2 = XNN_UNPREDICTABLE(vrndacc1x2 < voutput_min_less_zero_point) ? voutput_min_less_zero_point : vrndacc1x2;
- vrndacc1x3 = XNN_UNPREDICTABLE(vrndacc1x3 < voutput_min_less_zero_point) ? voutput_min_less_zero_point : vrndacc1x3;
-
- const long voutput_max_less_zero_point = params->fp32_scalar_lrint.output_max_less_zero_point;
- vrndacc0x0 = XNN_UNPREDICTABLE(vrndacc0x0 > voutput_max_less_zero_point) ? voutput_max_less_zero_point : vrndacc0x0;
- vrndacc0x1 = XNN_UNPREDICTABLE(vrndacc0x1 > voutput_max_less_zero_point) ? voutput_max_less_zero_point : vrndacc0x1;
- vrndacc0x2 = XNN_UNPREDICTABLE(vrndacc0x2 > voutput_max_less_zero_point) ? voutput_max_less_zero_point : vrndacc0x2;
- vrndacc0x3 = XNN_UNPREDICTABLE(vrndacc0x3 > voutput_max_less_zero_point) ? voutput_max_less_zero_point : vrndacc0x3;
- vrndacc1x0 = XNN_UNPREDICTABLE(vrndacc1x0 > voutput_max_less_zero_point) ? voutput_max_less_zero_point : vrndacc1x0;
- vrndacc1x1 = XNN_UNPREDICTABLE(vrndacc1x1 > voutput_max_less_zero_point) ? voutput_max_less_zero_point : vrndacc1x1;
- vrndacc1x2 = XNN_UNPREDICTABLE(vrndacc1x2 > voutput_max_less_zero_point) ? voutput_max_less_zero_point : vrndacc1x2;
- vrndacc1x3 = XNN_UNPREDICTABLE(vrndacc1x3 > voutput_max_less_zero_point) ? voutput_max_less_zero_point : vrndacc1x3;
-
- const int32_t voutput_zero_point = params->fp32_scalar_lrint.output_zero_point;
- int32_t vout0x0 = (int32_t) vrndacc0x0 + voutput_zero_point;
- int32_t vout0x1 = (int32_t) vrndacc0x1 + voutput_zero_point;
- int32_t vout0x2 = (int32_t) vrndacc0x2 + voutput_zero_point;
- int32_t vout0x3 = (int32_t) vrndacc0x3 + voutput_zero_point;
- int32_t vout1x0 = (int32_t) vrndacc1x0 + voutput_zero_point;
- int32_t vout1x1 = (int32_t) vrndacc1x1 + voutput_zero_point;
- int32_t vout1x2 = (int32_t) vrndacc1x2 + voutput_zero_point;
- int32_t vout1x3 = (int32_t) vrndacc1x3 + voutput_zero_point;
-
- if XNN_LIKELY(nc >= 4) {
- c0[0] = (int8_t) vout0x0;
- c0[1] = (int8_t) vout0x1;
- c0[2] = (int8_t) vout0x2;
- c0[3] = (int8_t) vout0x3;
- c1[0] = (int8_t) vout1x0;
- c1[1] = (int8_t) vout1x1;
- c1[2] = (int8_t) vout1x2;
- c1[3] = (int8_t) vout1x3;
-
- a0 = (const int8_t*) ((uintptr_t) a0 - kc);
- a1 = (const int8_t*) ((uintptr_t) a1 - kc);
-
- c0 = (int8_t*) ((uintptr_t) c0 + cn_stride);
- c1 = (int8_t*) ((uintptr_t) c1 + cn_stride);
-
- nc -= 4;
- } else {
- if (nc & 2) {
- c0[0] = (int8_t) vout0x0;
- c0[1] = (int8_t) vout0x1;
- vout0x0 = vout0x2;
- c0 += 2;
- c1[0] = (int8_t) vout1x0;
- c1[1] = (int8_t) vout1x1;
- vout1x0 = vout1x2;
- c1 += 2;
- }
- if (nc & 1) {
- c0[0] = (int8_t) vout0x0;
- c1[0] = (int8_t) vout1x0;
- }
-
- nc = 0;
- }
- } while (nc != 0);
-}
diff --git a/src/qs8-gemm/gen/2x4-minmax-fp32-scalar-magic.c b/src/qs8-gemm/gen/2x4-minmax-fp32-scalar-lrintf.c
similarity index 76%
copy from src/qs8-gemm/gen/2x4-minmax-fp32-scalar-magic.c
copy to src/qs8-gemm/gen/2x4-minmax-fp32-scalar-lrintf.c
index b3ea05a..604c8c4 100644
--- a/src/qs8-gemm/gen/2x4-minmax-fp32-scalar-magic.c
+++ b/src/qs8-gemm/gen/2x4-minmax-fp32-scalar-lrintf.c
@@ -8,14 +8,13 @@
// LICENSE file in the root directory of this source tree.
#include <assert.h>
-
-#include <fp16.h>
+#include <math.h>
#include <xnnpack/math.h>
#include <xnnpack/gemm.h>
-void xnn_qs8_gemm_minmax_fp32_ukernel_2x4__scalar_magic(
+void xnn_qs8_gemm_minmax_fp32_ukernel_2x4__scalar_lrintf(
size_t mr,
size_t nc,
size_t kc,
@@ -84,7 +83,7 @@
float vfpacc1x2 = (float) vacc1x2;
float vfpacc1x3 = (float) vacc1x3;
- const float vscale = params->fp32_scalar_magic.scale;
+ const float vscale = params->fp32_scalar_lrintf.scale;
vfpacc0x0 *= vscale;
vfpacc0x1 *= vscale;
vfpacc0x2 *= vscale;
@@ -94,7 +93,7 @@
vfpacc1x2 *= vscale;
vfpacc1x3 *= vscale;
- const float voutput_min_less_zero_point = params->fp32_scalar_magic.output_min_less_zero_point;
+ const float voutput_min_less_zero_point = params->fp32_scalar_lrintf.output_min_less_zero_point;
vfpacc0x0 = math_max_f32(vfpacc0x0, voutput_min_less_zero_point);
vfpacc0x1 = math_max_f32(vfpacc0x1, voutput_min_less_zero_point);
vfpacc0x2 = math_max_f32(vfpacc0x2, voutput_min_less_zero_point);
@@ -104,7 +103,7 @@
vfpacc1x2 = math_max_f32(vfpacc1x2, voutput_min_less_zero_point);
vfpacc1x3 = math_max_f32(vfpacc1x3, voutput_min_less_zero_point);
- const float voutput_max_less_zero_point = params->fp32_scalar_magic.output_max_less_zero_point;
+ const float voutput_max_less_zero_point = params->fp32_scalar_lrintf.output_max_less_zero_point;
vfpacc0x0 = math_min_f32(vfpacc0x0, voutput_max_less_zero_point);
vfpacc0x1 = math_min_f32(vfpacc0x1, voutput_max_less_zero_point);
vfpacc0x2 = math_min_f32(vfpacc0x2, voutput_max_less_zero_point);
@@ -114,25 +113,24 @@
vfpacc1x2 = math_min_f32(vfpacc1x2, voutput_max_less_zero_point);
vfpacc1x3 = math_min_f32(vfpacc1x3, voutput_max_less_zero_point);
- const float vmagic_bias = params->fp32_scalar_magic.magic_bias;
- vfpacc0x0 += vmagic_bias;
- vfpacc0x1 += vmagic_bias;
- vfpacc0x2 += vmagic_bias;
- vfpacc0x3 += vmagic_bias;
- vfpacc1x0 += vmagic_bias;
- vfpacc1x1 += vmagic_bias;
- vfpacc1x2 += vmagic_bias;
- vfpacc1x3 += vmagic_bias;
+ const int32_t vrndacc0x0 = (int32_t) lrintf(vfpacc0x0);
+ const int32_t vrndacc0x1 = (int32_t) lrintf(vfpacc0x1);
+ const int32_t vrndacc0x2 = (int32_t) lrintf(vfpacc0x2);
+ const int32_t vrndacc0x3 = (int32_t) lrintf(vfpacc0x3);
+ const int32_t vrndacc1x0 = (int32_t) lrintf(vfpacc1x0);
+ const int32_t vrndacc1x1 = (int32_t) lrintf(vfpacc1x1);
+ const int32_t vrndacc1x2 = (int32_t) lrintf(vfpacc1x2);
+ const int32_t vrndacc1x3 = (int32_t) lrintf(vfpacc1x3);
- const int32_t vmagic_bias_less_output_zero_point = params->fp32_scalar_magic.magic_bias_less_output_zero_point;
- int32_t vout0x0 = (int32_t) fp32_to_bits(vfpacc0x0) - vmagic_bias_less_output_zero_point;
- int32_t vout0x1 = (int32_t) fp32_to_bits(vfpacc0x1) - vmagic_bias_less_output_zero_point;
- int32_t vout0x2 = (int32_t) fp32_to_bits(vfpacc0x2) - vmagic_bias_less_output_zero_point;
- int32_t vout0x3 = (int32_t) fp32_to_bits(vfpacc0x3) - vmagic_bias_less_output_zero_point;
- int32_t vout1x0 = (int32_t) fp32_to_bits(vfpacc1x0) - vmagic_bias_less_output_zero_point;
- int32_t vout1x1 = (int32_t) fp32_to_bits(vfpacc1x1) - vmagic_bias_less_output_zero_point;
- int32_t vout1x2 = (int32_t) fp32_to_bits(vfpacc1x2) - vmagic_bias_less_output_zero_point;
- int32_t vout1x3 = (int32_t) fp32_to_bits(vfpacc1x3) - vmagic_bias_less_output_zero_point;
+ const int32_t voutput_zero_point = params->fp32_scalar_lrintf.output_zero_point;
+ int32_t vout0x0 = vrndacc0x0 + voutput_zero_point;
+ int32_t vout0x1 = vrndacc0x1 + voutput_zero_point;
+ int32_t vout0x2 = vrndacc0x2 + voutput_zero_point;
+ int32_t vout0x3 = vrndacc0x3 + voutput_zero_point;
+ int32_t vout1x0 = vrndacc1x0 + voutput_zero_point;
+ int32_t vout1x1 = vrndacc1x1 + voutput_zero_point;
+ int32_t vout1x2 = vrndacc1x2 + voutput_zero_point;
+ int32_t vout1x3 = vrndacc1x3 + voutput_zero_point;
if XNN_LIKELY(nc >= 4) {
c0[0] = (int8_t) vout0x0;
diff --git a/src/qs8-gemm/gen/2x4-minmax-rndnu-scalar.c b/src/qs8-gemm/gen/2x4-minmax-rndnu-scalar.c
deleted file mode 100644
index 02c2200..0000000
--- a/src/qs8-gemm/gen/2x4-minmax-rndnu-scalar.c
+++ /dev/null
@@ -1,163 +0,0 @@
-// Auto-generated file. Do not edit!
-// Template: src/qs8-gemm/scalar.c.in
-// Generator: tools/xngen
-//
-// Copyright 2021 Google LLC
-//
-// This source code is licensed under the BSD-style license found in the
-// LICENSE file in the root directory of this source tree.
-
-#include <assert.h>
-
-#include <xnnpack/math.h>
-#include <xnnpack/gemm.h>
-
-
-void xnn_qs8_gemm_minmax_rndnu_ukernel_2x4__scalar(
- size_t mr,
- size_t nc,
- size_t kc,
- const int8_t* restrict a,
- size_t a_stride,
- const void* restrict w,
- int8_t* restrict c,
- size_t cm_stride,
- size_t cn_stride,
- const union xnn_qs8_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
-{
- assert(mr != 0);
- assert(mr <= 2);
- assert(nc != 0);
- assert(kc != 0);
-
- const int8_t* a0 = a;
- int8_t* c0 = c;
- const int8_t* a1 = (const int8_t*) ((uintptr_t) a0 + a_stride);
- int8_t* c1 = (int8_t*) ((uintptr_t) c0 + cm_stride);
- if XNN_UNPREDICTABLE(mr != 2) {
- a1 = a0;
- c1 = c0;
- }
-
- do {
- int32_t vacc0x0 = ((const int32_t*) w)[0];
- int32_t vacc0x1 = ((const int32_t*) w)[1];
- int32_t vacc0x2 = ((const int32_t*) w)[2];
- int32_t vacc0x3 = ((const int32_t*) w)[3];
- int32_t vacc1x0 = vacc0x0;
- int32_t vacc1x1 = vacc0x1;
- int32_t vacc1x2 = vacc0x2;
- int32_t vacc1x3 = vacc0x3;
- w = (const void*) ((const int32_t*) w + 4);
-
- size_t k = kc;
- do {
- const int32_t va0 = (int32_t) *a0++;
- const int32_t va1 = (int32_t) *a1++;
-
- const int32_t vb0 = (int32_t) ((const int8_t*) w)[0];
- const int32_t vb1 = (int32_t) ((const int8_t*) w)[1];
- const int32_t vb2 = (int32_t) ((const int8_t*) w)[2];
- const int32_t vb3 = (int32_t) ((const int8_t*) w)[3];
- w = (const void*) ((const int8_t*) w + 4);
-
- vacc0x0 += va0 * vb0;
- vacc0x1 += va0 * vb1;
- vacc0x2 += va0 * vb2;
- vacc0x3 += va0 * vb3;
- vacc1x0 += va1 * vb0;
- vacc1x1 += va1 * vb1;
- vacc1x2 += va1 * vb2;
- vacc1x3 += va1 * vb3;
-
- k -= sizeof(int8_t);
- } while (k != 0);
-
- const int32_t vmultiplier = params->rndnu_scalar.multiplier;
- const int64_t vproduct0x0 = (int64_t) vacc0x0 * (int64_t) vmultiplier;
- const int64_t vproduct0x1 = (int64_t) vacc0x1 * (int64_t) vmultiplier;
- const int64_t vproduct0x2 = (int64_t) vacc0x2 * (int64_t) vmultiplier;
- const int64_t vproduct0x3 = (int64_t) vacc0x3 * (int64_t) vmultiplier;
- const int64_t vproduct1x0 = (int64_t) vacc1x0 * (int64_t) vmultiplier;
- const int64_t vproduct1x1 = (int64_t) vacc1x1 * (int64_t) vmultiplier;
- const int64_t vproduct1x2 = (int64_t) vacc1x2 * (int64_t) vmultiplier;
- const int64_t vproduct1x3 = (int64_t) vacc1x3 * (int64_t) vmultiplier;
-
- const uint32_t shift = params->rndnu_scalar.shift;
- const int64_t rounding = params->rndnu_scalar.rounding;
- int32_t vout0x0 = (int32_t) asr_s64(vproduct0x0 + rounding, shift);
- int32_t vout0x1 = (int32_t) asr_s64(vproduct0x1 + rounding, shift);
- int32_t vout0x2 = (int32_t) asr_s64(vproduct0x2 + rounding, shift);
- int32_t vout0x3 = (int32_t) asr_s64(vproduct0x3 + rounding, shift);
- int32_t vout1x0 = (int32_t) asr_s64(vproduct1x0 + rounding, shift);
- int32_t vout1x1 = (int32_t) asr_s64(vproduct1x1 + rounding, shift);
- int32_t vout1x2 = (int32_t) asr_s64(vproduct1x2 + rounding, shift);
- int32_t vout1x3 = (int32_t) asr_s64(vproduct1x3 + rounding, shift);
-
- const int32_t voutput_min_less_zero_point = params->rndnu_scalar.output_min_less_zero_point;
- vout0x0 = math_max_s32(vout0x0, voutput_min_less_zero_point);
- vout0x1 = math_max_s32(vout0x1, voutput_min_less_zero_point);
- vout0x2 = math_max_s32(vout0x2, voutput_min_less_zero_point);
- vout0x3 = math_max_s32(vout0x3, voutput_min_less_zero_point);
- vout1x0 = math_max_s32(vout1x0, voutput_min_less_zero_point);
- vout1x1 = math_max_s32(vout1x1, voutput_min_less_zero_point);
- vout1x2 = math_max_s32(vout1x2, voutput_min_less_zero_point);
- vout1x3 = math_max_s32(vout1x3, voutput_min_less_zero_point);
-
- const int32_t voutput_max_less_zero_point = params->rndnu_scalar.output_max_less_zero_point;
- vout0x0 = math_min_s32(vout0x0, voutput_max_less_zero_point);
- vout0x1 = math_min_s32(vout0x1, voutput_max_less_zero_point);
- vout0x2 = math_min_s32(vout0x2, voutput_max_less_zero_point);
- vout0x3 = math_min_s32(vout0x3, voutput_max_less_zero_point);
- vout1x0 = math_min_s32(vout1x0, voutput_max_less_zero_point);
- vout1x1 = math_min_s32(vout1x1, voutput_max_less_zero_point);
- vout1x2 = math_min_s32(vout1x2, voutput_max_less_zero_point);
- vout1x3 = math_min_s32(vout1x3, voutput_max_less_zero_point);
-
- const int32_t voutput_zero_point = params->rndnu_scalar.output_zero_point;
- vout0x0 += voutput_zero_point;
- vout0x1 += voutput_zero_point;
- vout0x2 += voutput_zero_point;
- vout0x3 += voutput_zero_point;
- vout1x0 += voutput_zero_point;
- vout1x1 += voutput_zero_point;
- vout1x2 += voutput_zero_point;
- vout1x3 += voutput_zero_point;
-
- if XNN_LIKELY(nc >= 4) {
- c0[0] = (int8_t) vout0x0;
- c0[1] = (int8_t) vout0x1;
- c0[2] = (int8_t) vout0x2;
- c0[3] = (int8_t) vout0x3;
- c1[0] = (int8_t) vout1x0;
- c1[1] = (int8_t) vout1x1;
- c1[2] = (int8_t) vout1x2;
- c1[3] = (int8_t) vout1x3;
-
- a0 = (const int8_t*) ((uintptr_t) a0 - kc);
- a1 = (const int8_t*) ((uintptr_t) a1 - kc);
-
- c0 = (int8_t*) ((uintptr_t) c0 + cn_stride);
- c1 = (int8_t*) ((uintptr_t) c1 + cn_stride);
-
- nc -= 4;
- } else {
- if (nc & 2) {
- c0[0] = (int8_t) vout0x0;
- c0[1] = (int8_t) vout0x1;
- vout0x0 = vout0x2;
- c0 += 2;
- c1[0] = (int8_t) vout1x0;
- c1[1] = (int8_t) vout1x1;
- vout1x0 = vout1x2;
- c1 += 2;
- }
- if (nc & 1) {
- c0[0] = (int8_t) vout0x0;
- c1[0] = (int8_t) vout1x0;
- }
-
- nc = 0;
- }
- } while (nc != 0);
-}
diff --git a/src/qs8-gemm/gen/3x2-minmax-fp32-scalar-magic.c b/src/qs8-gemm/gen/3x2-minmax-fp32-scalar-fmagic.c
similarity index 93%
rename from src/qs8-gemm/gen/3x2-minmax-fp32-scalar-magic.c
rename to src/qs8-gemm/gen/3x2-minmax-fp32-scalar-fmagic.c
index dea54e1..2683c09 100644
--- a/src/qs8-gemm/gen/3x2-minmax-fp32-scalar-magic.c
+++ b/src/qs8-gemm/gen/3x2-minmax-fp32-scalar-fmagic.c
@@ -15,7 +15,7 @@
#include <xnnpack/gemm.h>
-void xnn_qs8_gemm_minmax_fp32_ukernel_3x2__scalar_magic(
+void xnn_qs8_gemm_minmax_fp32_ukernel_3x2__scalar_fmagic(
size_t mr,
size_t nc,
size_t kc,
@@ -83,7 +83,7 @@
float vfpacc2x0 = (float) vacc2x0;
float vfpacc2x1 = (float) vacc2x1;
- const float vscale = params->fp32_scalar_magic.scale;
+ const float vscale = params->fp32_scalar_fmagic.scale;
vfpacc0x0 *= vscale;
vfpacc0x1 *= vscale;
vfpacc1x0 *= vscale;
@@ -91,7 +91,7 @@
vfpacc2x0 *= vscale;
vfpacc2x1 *= vscale;
- const float voutput_min_less_zero_point = params->fp32_scalar_magic.output_min_less_zero_point;
+ const float voutput_min_less_zero_point = params->fp32_scalar_fmagic.output_min_less_zero_point;
vfpacc0x0 = math_max_f32(vfpacc0x0, voutput_min_less_zero_point);
vfpacc0x1 = math_max_f32(vfpacc0x1, voutput_min_less_zero_point);
vfpacc1x0 = math_max_f32(vfpacc1x0, voutput_min_less_zero_point);
@@ -99,7 +99,7 @@
vfpacc2x0 = math_max_f32(vfpacc2x0, voutput_min_less_zero_point);
vfpacc2x1 = math_max_f32(vfpacc2x1, voutput_min_less_zero_point);
- const float voutput_max_less_zero_point = params->fp32_scalar_magic.output_max_less_zero_point;
+ const float voutput_max_less_zero_point = params->fp32_scalar_fmagic.output_max_less_zero_point;
vfpacc0x0 = math_min_f32(vfpacc0x0, voutput_max_less_zero_point);
vfpacc0x1 = math_min_f32(vfpacc0x1, voutput_max_less_zero_point);
vfpacc1x0 = math_min_f32(vfpacc1x0, voutput_max_less_zero_point);
@@ -107,7 +107,7 @@
vfpacc2x0 = math_min_f32(vfpacc2x0, voutput_max_less_zero_point);
vfpacc2x1 = math_min_f32(vfpacc2x1, voutput_max_less_zero_point);
- const float vmagic_bias = params->fp32_scalar_magic.magic_bias;
+ const float vmagic_bias = params->fp32_scalar_fmagic.magic_bias;
vfpacc0x0 += vmagic_bias;
vfpacc0x1 += vmagic_bias;
vfpacc1x0 += vmagic_bias;
@@ -115,7 +115,7 @@
vfpacc2x0 += vmagic_bias;
vfpacc2x1 += vmagic_bias;
- const int32_t vmagic_bias_less_output_zero_point = params->fp32_scalar_magic.magic_bias_less_output_zero_point;
+ const int32_t vmagic_bias_less_output_zero_point = params->fp32_scalar_fmagic.magic_bias_less_output_zero_point;
int32_t vout0x0 = (int32_t) fp32_to_bits(vfpacc0x0) - vmagic_bias_less_output_zero_point;
int32_t vout0x1 = (int32_t) fp32_to_bits(vfpacc0x1) - vmagic_bias_less_output_zero_point;
int32_t vout1x0 = (int32_t) fp32_to_bits(vfpacc1x0) - vmagic_bias_less_output_zero_point;
diff --git a/src/qs8-gemm/gen/3x2-minmax-fp32-scalar-lrint.c b/src/qs8-gemm/gen/3x2-minmax-fp32-scalar-lrint.c
deleted file mode 100644
index 2077c4e..0000000
--- a/src/qs8-gemm/gen/3x2-minmax-fp32-scalar-lrint.c
+++ /dev/null
@@ -1,151 +0,0 @@
-// Auto-generated file. Do not edit!
-// Template: src/qs8-gemm/scalar.c.in
-// Generator: tools/xngen
-//
-// Copyright 2021 Google LLC
-//
-// This source code is licensed under the BSD-style license found in the
-// LICENSE file in the root directory of this source tree.
-
-#include <assert.h>
-#include <math.h>
-
-#include <xnnpack/math.h>
-#include <xnnpack/gemm.h>
-
-
-void xnn_qs8_gemm_minmax_fp32_ukernel_3x2__scalar_lrint(
- size_t mr,
- size_t nc,
- size_t kc,
- const int8_t* restrict a,
- size_t a_stride,
- const void* restrict w,
- int8_t* restrict c,
- size_t cm_stride,
- size_t cn_stride,
- const union xnn_qs8_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
-{
- assert(mr != 0);
- assert(mr <= 3);
- assert(nc != 0);
- assert(kc != 0);
-
- const int8_t* a0 = a;
- int8_t* c0 = c;
- const int8_t* a1 = (const int8_t*) ((uintptr_t) a0 + a_stride);
- int8_t* c1 = (int8_t*) ((uintptr_t) c0 + cm_stride);
- if XNN_UNPREDICTABLE(mr < 2) {
- a1 = a0;
- c1 = c0;
- }
- const int8_t* a2 = (const int8_t*) ((uintptr_t) a1 + a_stride);
- int8_t* c2 = (int8_t*) ((uintptr_t) c1 + cm_stride);
- if XNN_UNPREDICTABLE(mr <= 2) {
- a2 = a1;
- c2 = c1;
- }
-
- do {
- int32_t vacc0x0 = ((const int32_t*) w)[0];
- int32_t vacc0x1 = ((const int32_t*) w)[1];
- int32_t vacc1x0 = vacc0x0;
- int32_t vacc1x1 = vacc0x1;
- int32_t vacc2x0 = vacc0x0;
- int32_t vacc2x1 = vacc0x1;
- w = (const void*) ((const int32_t*) w + 2);
-
- size_t k = kc;
- do {
- const int32_t va0 = (int32_t) *a0++;
- const int32_t va1 = (int32_t) *a1++;
- const int32_t va2 = (int32_t) *a2++;
-
- const int32_t vb0 = (int32_t) ((const int8_t*) w)[0];
- const int32_t vb1 = (int32_t) ((const int8_t*) w)[1];
- w = (const void*) ((const int8_t*) w + 2);
-
- vacc0x0 += va0 * vb0;
- vacc0x1 += va0 * vb1;
- vacc1x0 += va1 * vb0;
- vacc1x1 += va1 * vb1;
- vacc2x0 += va2 * vb0;
- vacc2x1 += va2 * vb1;
-
- k -= sizeof(int8_t);
- } while (k != 0);
-
- float vfpacc0x0 = (float) vacc0x0;
- float vfpacc0x1 = (float) vacc0x1;
- float vfpacc1x0 = (float) vacc1x0;
- float vfpacc1x1 = (float) vacc1x1;
- float vfpacc2x0 = (float) vacc2x0;
- float vfpacc2x1 = (float) vacc2x1;
-
- const float vscale = params->fp32_scalar_lrint.scale;
- vfpacc0x0 *= vscale;
- vfpacc0x1 *= vscale;
- vfpacc1x0 *= vscale;
- vfpacc1x1 *= vscale;
- vfpacc2x0 *= vscale;
- vfpacc2x1 *= vscale;
-
- long vrndacc0x0 = lrintf(vfpacc0x0);
- long vrndacc0x1 = lrintf(vfpacc0x1);
- long vrndacc1x0 = lrintf(vfpacc1x0);
- long vrndacc1x1 = lrintf(vfpacc1x1);
- long vrndacc2x0 = lrintf(vfpacc2x0);
- long vrndacc2x1 = lrintf(vfpacc2x1);
-
- const long voutput_min_less_zero_point = params->fp32_scalar_lrint.output_min_less_zero_point;
- vrndacc0x0 = XNN_UNPREDICTABLE(vrndacc0x0 < voutput_min_less_zero_point) ? voutput_min_less_zero_point : vrndacc0x0;
- vrndacc0x1 = XNN_UNPREDICTABLE(vrndacc0x1 < voutput_min_less_zero_point) ? voutput_min_less_zero_point : vrndacc0x1;
- vrndacc1x0 = XNN_UNPREDICTABLE(vrndacc1x0 < voutput_min_less_zero_point) ? voutput_min_less_zero_point : vrndacc1x0;
- vrndacc1x1 = XNN_UNPREDICTABLE(vrndacc1x1 < voutput_min_less_zero_point) ? voutput_min_less_zero_point : vrndacc1x1;
- vrndacc2x0 = XNN_UNPREDICTABLE(vrndacc2x0 < voutput_min_less_zero_point) ? voutput_min_less_zero_point : vrndacc2x0;
- vrndacc2x1 = XNN_UNPREDICTABLE(vrndacc2x1 < voutput_min_less_zero_point) ? voutput_min_less_zero_point : vrndacc2x1;
-
- const long voutput_max_less_zero_point = params->fp32_scalar_lrint.output_max_less_zero_point;
- vrndacc0x0 = XNN_UNPREDICTABLE(vrndacc0x0 > voutput_max_less_zero_point) ? voutput_max_less_zero_point : vrndacc0x0;
- vrndacc0x1 = XNN_UNPREDICTABLE(vrndacc0x1 > voutput_max_less_zero_point) ? voutput_max_less_zero_point : vrndacc0x1;
- vrndacc1x0 = XNN_UNPREDICTABLE(vrndacc1x0 > voutput_max_less_zero_point) ? voutput_max_less_zero_point : vrndacc1x0;
- vrndacc1x1 = XNN_UNPREDICTABLE(vrndacc1x1 > voutput_max_less_zero_point) ? voutput_max_less_zero_point : vrndacc1x1;
- vrndacc2x0 = XNN_UNPREDICTABLE(vrndacc2x0 > voutput_max_less_zero_point) ? voutput_max_less_zero_point : vrndacc2x0;
- vrndacc2x1 = XNN_UNPREDICTABLE(vrndacc2x1 > voutput_max_less_zero_point) ? voutput_max_less_zero_point : vrndacc2x1;
-
- const int32_t voutput_zero_point = params->fp32_scalar_lrint.output_zero_point;
- int32_t vout0x0 = (int32_t) vrndacc0x0 + voutput_zero_point;
- int32_t vout0x1 = (int32_t) vrndacc0x1 + voutput_zero_point;
- int32_t vout1x0 = (int32_t) vrndacc1x0 + voutput_zero_point;
- int32_t vout1x1 = (int32_t) vrndacc1x1 + voutput_zero_point;
- int32_t vout2x0 = (int32_t) vrndacc2x0 + voutput_zero_point;
- int32_t vout2x1 = (int32_t) vrndacc2x1 + voutput_zero_point;
-
- if XNN_LIKELY(nc >= 2) {
- c0[0] = (int8_t) vout0x0;
- c0[1] = (int8_t) vout0x1;
- c1[0] = (int8_t) vout1x0;
- c1[1] = (int8_t) vout1x1;
- c2[0] = (int8_t) vout2x0;
- c2[1] = (int8_t) vout2x1;
-
- a0 = (const int8_t*) ((uintptr_t) a0 - kc);
- a1 = (const int8_t*) ((uintptr_t) a1 - kc);
- a2 = (const int8_t*) ((uintptr_t) a2 - kc);
-
- c0 = (int8_t*) ((uintptr_t) c0 + cn_stride);
- c1 = (int8_t*) ((uintptr_t) c1 + cn_stride);
- c2 = (int8_t*) ((uintptr_t) c2 + cn_stride);
-
- nc -= 2;
- } else {
- if (nc & 1) {
- c0[0] = (int8_t) vout0x0;
- c1[0] = (int8_t) vout1x0;
- c2[0] = (int8_t) vout2x0;
- }
-
- nc = 0;
- }
- } while (nc != 0);
-}
diff --git a/src/qs8-gemm/gen/3x2-minmax-fp32-scalar-magic.c b/src/qs8-gemm/gen/3x2-minmax-fp32-scalar-lrintf.c
similarity index 77%
copy from src/qs8-gemm/gen/3x2-minmax-fp32-scalar-magic.c
copy to src/qs8-gemm/gen/3x2-minmax-fp32-scalar-lrintf.c
index dea54e1..0283c99 100644
--- a/src/qs8-gemm/gen/3x2-minmax-fp32-scalar-magic.c
+++ b/src/qs8-gemm/gen/3x2-minmax-fp32-scalar-lrintf.c
@@ -8,14 +8,13 @@
// LICENSE file in the root directory of this source tree.
#include <assert.h>
-
-#include <fp16.h>
+#include <math.h>
#include <xnnpack/math.h>
#include <xnnpack/gemm.h>
-void xnn_qs8_gemm_minmax_fp32_ukernel_3x2__scalar_magic(
+void xnn_qs8_gemm_minmax_fp32_ukernel_3x2__scalar_lrintf(
size_t mr,
size_t nc,
size_t kc,
@@ -83,7 +82,7 @@
float vfpacc2x0 = (float) vacc2x0;
float vfpacc2x1 = (float) vacc2x1;
- const float vscale = params->fp32_scalar_magic.scale;
+ const float vscale = params->fp32_scalar_lrintf.scale;
vfpacc0x0 *= vscale;
vfpacc0x1 *= vscale;
vfpacc1x0 *= vscale;
@@ -91,7 +90,7 @@
vfpacc2x0 *= vscale;
vfpacc2x1 *= vscale;
- const float voutput_min_less_zero_point = params->fp32_scalar_magic.output_min_less_zero_point;
+ const float voutput_min_less_zero_point = params->fp32_scalar_lrintf.output_min_less_zero_point;
vfpacc0x0 = math_max_f32(vfpacc0x0, voutput_min_less_zero_point);
vfpacc0x1 = math_max_f32(vfpacc0x1, voutput_min_less_zero_point);
vfpacc1x0 = math_max_f32(vfpacc1x0, voutput_min_less_zero_point);
@@ -99,7 +98,7 @@
vfpacc2x0 = math_max_f32(vfpacc2x0, voutput_min_less_zero_point);
vfpacc2x1 = math_max_f32(vfpacc2x1, voutput_min_less_zero_point);
- const float voutput_max_less_zero_point = params->fp32_scalar_magic.output_max_less_zero_point;
+ const float voutput_max_less_zero_point = params->fp32_scalar_lrintf.output_max_less_zero_point;
vfpacc0x0 = math_min_f32(vfpacc0x0, voutput_max_less_zero_point);
vfpacc0x1 = math_min_f32(vfpacc0x1, voutput_max_less_zero_point);
vfpacc1x0 = math_min_f32(vfpacc1x0, voutput_max_less_zero_point);
@@ -107,21 +106,20 @@
vfpacc2x0 = math_min_f32(vfpacc2x0, voutput_max_less_zero_point);
vfpacc2x1 = math_min_f32(vfpacc2x1, voutput_max_less_zero_point);
- const float vmagic_bias = params->fp32_scalar_magic.magic_bias;
- vfpacc0x0 += vmagic_bias;
- vfpacc0x1 += vmagic_bias;
- vfpacc1x0 += vmagic_bias;
- vfpacc1x1 += vmagic_bias;
- vfpacc2x0 += vmagic_bias;
- vfpacc2x1 += vmagic_bias;
+ const int32_t vrndacc0x0 = (int32_t) lrintf(vfpacc0x0);
+ const int32_t vrndacc0x1 = (int32_t) lrintf(vfpacc0x1);
+ const int32_t vrndacc1x0 = (int32_t) lrintf(vfpacc1x0);
+ const int32_t vrndacc1x1 = (int32_t) lrintf(vfpacc1x1);
+ const int32_t vrndacc2x0 = (int32_t) lrintf(vfpacc2x0);
+ const int32_t vrndacc2x1 = (int32_t) lrintf(vfpacc2x1);
- const int32_t vmagic_bias_less_output_zero_point = params->fp32_scalar_magic.magic_bias_less_output_zero_point;
- int32_t vout0x0 = (int32_t) fp32_to_bits(vfpacc0x0) - vmagic_bias_less_output_zero_point;
- int32_t vout0x1 = (int32_t) fp32_to_bits(vfpacc0x1) - vmagic_bias_less_output_zero_point;
- int32_t vout1x0 = (int32_t) fp32_to_bits(vfpacc1x0) - vmagic_bias_less_output_zero_point;
- int32_t vout1x1 = (int32_t) fp32_to_bits(vfpacc1x1) - vmagic_bias_less_output_zero_point;
- int32_t vout2x0 = (int32_t) fp32_to_bits(vfpacc2x0) - vmagic_bias_less_output_zero_point;
- int32_t vout2x1 = (int32_t) fp32_to_bits(vfpacc2x1) - vmagic_bias_less_output_zero_point;
+ const int32_t voutput_zero_point = params->fp32_scalar_lrintf.output_zero_point;
+ int32_t vout0x0 = vrndacc0x0 + voutput_zero_point;
+ int32_t vout0x1 = vrndacc0x1 + voutput_zero_point;
+ int32_t vout1x0 = vrndacc1x0 + voutput_zero_point;
+ int32_t vout1x1 = vrndacc1x1 + voutput_zero_point;
+ int32_t vout2x0 = vrndacc2x0 + voutput_zero_point;
+ int32_t vout2x1 = vrndacc2x1 + voutput_zero_point;
if XNN_LIKELY(nc >= 2) {
c0[0] = (int8_t) vout0x0;
diff --git a/src/qs8-gemm/gen/3x2-minmax-rndnu-scalar.c b/src/qs8-gemm/gen/3x2-minmax-rndnu-scalar.c
deleted file mode 100644
index f3270b2..0000000
--- a/src/qs8-gemm/gen/3x2-minmax-rndnu-scalar.c
+++ /dev/null
@@ -1,145 +0,0 @@
-// Auto-generated file. Do not edit!
-// Template: src/qs8-gemm/scalar.c.in
-// Generator: tools/xngen
-//
-// Copyright 2021 Google LLC
-//
-// This source code is licensed under the BSD-style license found in the
-// LICENSE file in the root directory of this source tree.
-
-#include <assert.h>
-
-#include <xnnpack/math.h>
-#include <xnnpack/gemm.h>
-
-
-void xnn_qs8_gemm_minmax_rndnu_ukernel_3x2__scalar(
- size_t mr,
- size_t nc,
- size_t kc,
- const int8_t* restrict a,
- size_t a_stride,
- const void* restrict w,
- int8_t* restrict c,
- size_t cm_stride,
- size_t cn_stride,
- const union xnn_qs8_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
-{
- assert(mr != 0);
- assert(mr <= 3);
- assert(nc != 0);
- assert(kc != 0);
-
- const int8_t* a0 = a;
- int8_t* c0 = c;
- const int8_t* a1 = (const int8_t*) ((uintptr_t) a0 + a_stride);
- int8_t* c1 = (int8_t*) ((uintptr_t) c0 + cm_stride);
- if XNN_UNPREDICTABLE(mr < 2) {
- a1 = a0;
- c1 = c0;
- }
- const int8_t* a2 = (const int8_t*) ((uintptr_t) a1 + a_stride);
- int8_t* c2 = (int8_t*) ((uintptr_t) c1 + cm_stride);
- if XNN_UNPREDICTABLE(mr <= 2) {
- a2 = a1;
- c2 = c1;
- }
-
- do {
- int32_t vacc0x0 = ((const int32_t*) w)[0];
- int32_t vacc0x1 = ((const int32_t*) w)[1];
- int32_t vacc1x0 = vacc0x0;
- int32_t vacc1x1 = vacc0x1;
- int32_t vacc2x0 = vacc0x0;
- int32_t vacc2x1 = vacc0x1;
- w = (const void*) ((const int32_t*) w + 2);
-
- size_t k = kc;
- do {
- const int32_t va0 = (int32_t) *a0++;
- const int32_t va1 = (int32_t) *a1++;
- const int32_t va2 = (int32_t) *a2++;
-
- const int32_t vb0 = (int32_t) ((const int8_t*) w)[0];
- const int32_t vb1 = (int32_t) ((const int8_t*) w)[1];
- w = (const void*) ((const int8_t*) w + 2);
-
- vacc0x0 += va0 * vb0;
- vacc0x1 += va0 * vb1;
- vacc1x0 += va1 * vb0;
- vacc1x1 += va1 * vb1;
- vacc2x0 += va2 * vb0;
- vacc2x1 += va2 * vb1;
-
- k -= sizeof(int8_t);
- } while (k != 0);
-
- const int32_t vmultiplier = params->rndnu_scalar.multiplier;
- const int64_t vproduct0x0 = (int64_t) vacc0x0 * (int64_t) vmultiplier;
- const int64_t vproduct0x1 = (int64_t) vacc0x1 * (int64_t) vmultiplier;
- const int64_t vproduct1x0 = (int64_t) vacc1x0 * (int64_t) vmultiplier;
- const int64_t vproduct1x1 = (int64_t) vacc1x1 * (int64_t) vmultiplier;
- const int64_t vproduct2x0 = (int64_t) vacc2x0 * (int64_t) vmultiplier;
- const int64_t vproduct2x1 = (int64_t) vacc2x1 * (int64_t) vmultiplier;
-
- const uint32_t shift = params->rndnu_scalar.shift;
- const int64_t rounding = params->rndnu_scalar.rounding;
- int32_t vout0x0 = (int32_t) asr_s64(vproduct0x0 + rounding, shift);
- int32_t vout0x1 = (int32_t) asr_s64(vproduct0x1 + rounding, shift);
- int32_t vout1x0 = (int32_t) asr_s64(vproduct1x0 + rounding, shift);
- int32_t vout1x1 = (int32_t) asr_s64(vproduct1x1 + rounding, shift);
- int32_t vout2x0 = (int32_t) asr_s64(vproduct2x0 + rounding, shift);
- int32_t vout2x1 = (int32_t) asr_s64(vproduct2x1 + rounding, shift);
-
- const int32_t voutput_min_less_zero_point = params->rndnu_scalar.output_min_less_zero_point;
- vout0x0 = math_max_s32(vout0x0, voutput_min_less_zero_point);
- vout0x1 = math_max_s32(vout0x1, voutput_min_less_zero_point);
- vout1x0 = math_max_s32(vout1x0, voutput_min_less_zero_point);
- vout1x1 = math_max_s32(vout1x1, voutput_min_less_zero_point);
- vout2x0 = math_max_s32(vout2x0, voutput_min_less_zero_point);
- vout2x1 = math_max_s32(vout2x1, voutput_min_less_zero_point);
-
- const int32_t voutput_max_less_zero_point = params->rndnu_scalar.output_max_less_zero_point;
- vout0x0 = math_min_s32(vout0x0, voutput_max_less_zero_point);
- vout0x1 = math_min_s32(vout0x1, voutput_max_less_zero_point);
- vout1x0 = math_min_s32(vout1x0, voutput_max_less_zero_point);
- vout1x1 = math_min_s32(vout1x1, voutput_max_less_zero_point);
- vout2x0 = math_min_s32(vout2x0, voutput_max_less_zero_point);
- vout2x1 = math_min_s32(vout2x1, voutput_max_less_zero_point);
-
- const int32_t voutput_zero_point = params->rndnu_scalar.output_zero_point;
- vout0x0 += voutput_zero_point;
- vout0x1 += voutput_zero_point;
- vout1x0 += voutput_zero_point;
- vout1x1 += voutput_zero_point;
- vout2x0 += voutput_zero_point;
- vout2x1 += voutput_zero_point;
-
- if XNN_LIKELY(nc >= 2) {
- c0[0] = (int8_t) vout0x0;
- c0[1] = (int8_t) vout0x1;
- c1[0] = (int8_t) vout1x0;
- c1[1] = (int8_t) vout1x1;
- c2[0] = (int8_t) vout2x0;
- c2[1] = (int8_t) vout2x1;
-
- a0 = (const int8_t*) ((uintptr_t) a0 - kc);
- a1 = (const int8_t*) ((uintptr_t) a1 - kc);
- a2 = (const int8_t*) ((uintptr_t) a2 - kc);
-
- c0 = (int8_t*) ((uintptr_t) c0 + cn_stride);
- c1 = (int8_t*) ((uintptr_t) c1 + cn_stride);
- c2 = (int8_t*) ((uintptr_t) c2 + cn_stride);
-
- nc -= 2;
- } else {
- if (nc & 1) {
- c0[0] = (int8_t) vout0x0;
- c1[0] = (int8_t) vout1x0;
- c2[0] = (int8_t) vout2x0;
- }
-
- nc = 0;
- }
- } while (nc != 0);
-}
diff --git a/src/qs8-gemm/gen/3x4-minmax-fp32-scalar-magic.c b/src/qs8-gemm/gen/3x4-minmax-fp32-scalar-fmagic.c
similarity index 95%
rename from src/qs8-gemm/gen/3x4-minmax-fp32-scalar-magic.c
rename to src/qs8-gemm/gen/3x4-minmax-fp32-scalar-fmagic.c
index 8b966d9..86bffc9 100644
--- a/src/qs8-gemm/gen/3x4-minmax-fp32-scalar-magic.c
+++ b/src/qs8-gemm/gen/3x4-minmax-fp32-scalar-fmagic.c
@@ -15,7 +15,7 @@
#include <xnnpack/gemm.h>
-void xnn_qs8_gemm_minmax_fp32_ukernel_3x4__scalar_magic(
+void xnn_qs8_gemm_minmax_fp32_ukernel_3x4__scalar_fmagic(
size_t mr,
size_t nc,
size_t kc,
@@ -103,7 +103,7 @@
float vfpacc2x2 = (float) vacc2x2;
float vfpacc2x3 = (float) vacc2x3;
- const float vscale = params->fp32_scalar_magic.scale;
+ const float vscale = params->fp32_scalar_fmagic.scale;
vfpacc0x0 *= vscale;
vfpacc0x1 *= vscale;
vfpacc0x2 *= vscale;
@@ -117,7 +117,7 @@
vfpacc2x2 *= vscale;
vfpacc2x3 *= vscale;
- const float voutput_min_less_zero_point = params->fp32_scalar_magic.output_min_less_zero_point;
+ const float voutput_min_less_zero_point = params->fp32_scalar_fmagic.output_min_less_zero_point;
vfpacc0x0 = math_max_f32(vfpacc0x0, voutput_min_less_zero_point);
vfpacc0x1 = math_max_f32(vfpacc0x1, voutput_min_less_zero_point);
vfpacc0x2 = math_max_f32(vfpacc0x2, voutput_min_less_zero_point);
@@ -131,7 +131,7 @@
vfpacc2x2 = math_max_f32(vfpacc2x2, voutput_min_less_zero_point);
vfpacc2x3 = math_max_f32(vfpacc2x3, voutput_min_less_zero_point);
- const float voutput_max_less_zero_point = params->fp32_scalar_magic.output_max_less_zero_point;
+ const float voutput_max_less_zero_point = params->fp32_scalar_fmagic.output_max_less_zero_point;
vfpacc0x0 = math_min_f32(vfpacc0x0, voutput_max_less_zero_point);
vfpacc0x1 = math_min_f32(vfpacc0x1, voutput_max_less_zero_point);
vfpacc0x2 = math_min_f32(vfpacc0x2, voutput_max_less_zero_point);
@@ -145,7 +145,7 @@
vfpacc2x2 = math_min_f32(vfpacc2x2, voutput_max_less_zero_point);
vfpacc2x3 = math_min_f32(vfpacc2x3, voutput_max_less_zero_point);
- const float vmagic_bias = params->fp32_scalar_magic.magic_bias;
+ const float vmagic_bias = params->fp32_scalar_fmagic.magic_bias;
vfpacc0x0 += vmagic_bias;
vfpacc0x1 += vmagic_bias;
vfpacc0x2 += vmagic_bias;
@@ -159,7 +159,7 @@
vfpacc2x2 += vmagic_bias;
vfpacc2x3 += vmagic_bias;
- const int32_t vmagic_bias_less_output_zero_point = params->fp32_scalar_magic.magic_bias_less_output_zero_point;
+ const int32_t vmagic_bias_less_output_zero_point = params->fp32_scalar_fmagic.magic_bias_less_output_zero_point;
int32_t vout0x0 = (int32_t) fp32_to_bits(vfpacc0x0) - vmagic_bias_less_output_zero_point;
int32_t vout0x1 = (int32_t) fp32_to_bits(vfpacc0x1) - vmagic_bias_less_output_zero_point;
int32_t vout0x2 = (int32_t) fp32_to_bits(vfpacc0x2) - vmagic_bias_less_output_zero_point;
diff --git a/src/qs8-gemm/gen/3x4-minmax-fp32-scalar-lrint.c b/src/qs8-gemm/gen/3x4-minmax-fp32-scalar-lrint.c
deleted file mode 100644
index b912ebb..0000000
--- a/src/qs8-gemm/gen/3x4-minmax-fp32-scalar-lrint.c
+++ /dev/null
@@ -1,221 +0,0 @@
-// Auto-generated file. Do not edit!
-// Template: src/qs8-gemm/scalar.c.in
-// Generator: tools/xngen
-//
-// Copyright 2021 Google LLC
-//
-// This source code is licensed under the BSD-style license found in the
-// LICENSE file in the root directory of this source tree.
-
-#include <assert.h>
-#include <math.h>
-
-#include <xnnpack/math.h>
-#include <xnnpack/gemm.h>
-
-
-void xnn_qs8_gemm_minmax_fp32_ukernel_3x4__scalar_lrint(
- size_t mr,
- size_t nc,
- size_t kc,
- const int8_t* restrict a,
- size_t a_stride,
- const void* restrict w,
- int8_t* restrict c,
- size_t cm_stride,
- size_t cn_stride,
- const union xnn_qs8_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
-{
- assert(mr != 0);
- assert(mr <= 3);
- assert(nc != 0);
- assert(kc != 0);
-
- const int8_t* a0 = a;
- int8_t* c0 = c;
- const int8_t* a1 = (const int8_t*) ((uintptr_t) a0 + a_stride);
- int8_t* c1 = (int8_t*) ((uintptr_t) c0 + cm_stride);
- if XNN_UNPREDICTABLE(mr < 2) {
- a1 = a0;
- c1 = c0;
- }
- const int8_t* a2 = (const int8_t*) ((uintptr_t) a1 + a_stride);
- int8_t* c2 = (int8_t*) ((uintptr_t) c1 + cm_stride);
- if XNN_UNPREDICTABLE(mr <= 2) {
- a2 = a1;
- c2 = c1;
- }
-
- do {
- int32_t vacc0x0 = ((const int32_t*) w)[0];
- int32_t vacc0x1 = ((const int32_t*) w)[1];
- int32_t vacc0x2 = ((const int32_t*) w)[2];
- int32_t vacc0x3 = ((const int32_t*) w)[3];
- int32_t vacc1x0 = vacc0x0;
- int32_t vacc1x1 = vacc0x1;
- int32_t vacc1x2 = vacc0x2;
- int32_t vacc1x3 = vacc0x3;
- int32_t vacc2x0 = vacc0x0;
- int32_t vacc2x1 = vacc0x1;
- int32_t vacc2x2 = vacc0x2;
- int32_t vacc2x3 = vacc0x3;
- w = (const void*) ((const int32_t*) w + 4);
-
- size_t k = kc;
- do {
- const int32_t va0 = (int32_t) *a0++;
- const int32_t va1 = (int32_t) *a1++;
- const int32_t va2 = (int32_t) *a2++;
-
- const int32_t vb0 = (int32_t) ((const int8_t*) w)[0];
- const int32_t vb1 = (int32_t) ((const int8_t*) w)[1];
- const int32_t vb2 = (int32_t) ((const int8_t*) w)[2];
- const int32_t vb3 = (int32_t) ((const int8_t*) w)[3];
- w = (const void*) ((const int8_t*) w + 4);
-
- vacc0x0 += va0 * vb0;
- vacc0x1 += va0 * vb1;
- vacc0x2 += va0 * vb2;
- vacc0x3 += va0 * vb3;
- vacc1x0 += va1 * vb0;
- vacc1x1 += va1 * vb1;
- vacc1x2 += va1 * vb2;
- vacc1x3 += va1 * vb3;
- vacc2x0 += va2 * vb0;
- vacc2x1 += va2 * vb1;
- vacc2x2 += va2 * vb2;
- vacc2x3 += va2 * vb3;
-
- k -= sizeof(int8_t);
- } while (k != 0);
-
- float vfpacc0x0 = (float) vacc0x0;
- float vfpacc0x1 = (float) vacc0x1;
- float vfpacc0x2 = (float) vacc0x2;
- float vfpacc0x3 = (float) vacc0x3;
- float vfpacc1x0 = (float) vacc1x0;
- float vfpacc1x1 = (float) vacc1x1;
- float vfpacc1x2 = (float) vacc1x2;
- float vfpacc1x3 = (float) vacc1x3;
- float vfpacc2x0 = (float) vacc2x0;
- float vfpacc2x1 = (float) vacc2x1;
- float vfpacc2x2 = (float) vacc2x2;
- float vfpacc2x3 = (float) vacc2x3;
-
- const float vscale = params->fp32_scalar_lrint.scale;
- vfpacc0x0 *= vscale;
- vfpacc0x1 *= vscale;
- vfpacc0x2 *= vscale;
- vfpacc0x3 *= vscale;
- vfpacc1x0 *= vscale;
- vfpacc1x1 *= vscale;
- vfpacc1x2 *= vscale;
- vfpacc1x3 *= vscale;
- vfpacc2x0 *= vscale;
- vfpacc2x1 *= vscale;
- vfpacc2x2 *= vscale;
- vfpacc2x3 *= vscale;
-
- long vrndacc0x0 = lrintf(vfpacc0x0);
- long vrndacc0x1 = lrintf(vfpacc0x1);
- long vrndacc0x2 = lrintf(vfpacc0x2);
- long vrndacc0x3 = lrintf(vfpacc0x3);
- long vrndacc1x0 = lrintf(vfpacc1x0);
- long vrndacc1x1 = lrintf(vfpacc1x1);
- long vrndacc1x2 = lrintf(vfpacc1x2);
- long vrndacc1x3 = lrintf(vfpacc1x3);
- long vrndacc2x0 = lrintf(vfpacc2x0);
- long vrndacc2x1 = lrintf(vfpacc2x1);
- long vrndacc2x2 = lrintf(vfpacc2x2);
- long vrndacc2x3 = lrintf(vfpacc2x3);
-
- const long voutput_min_less_zero_point = params->fp32_scalar_lrint.output_min_less_zero_point;
- vrndacc0x0 = XNN_UNPREDICTABLE(vrndacc0x0 < voutput_min_less_zero_point) ? voutput_min_less_zero_point : vrndacc0x0;
- vrndacc0x1 = XNN_UNPREDICTABLE(vrndacc0x1 < voutput_min_less_zero_point) ? voutput_min_less_zero_point : vrndacc0x1;
- vrndacc0x2 = XNN_UNPREDICTABLE(vrndacc0x2 < voutput_min_less_zero_point) ? voutput_min_less_zero_point : vrndacc0x2;
- vrndacc0x3 = XNN_UNPREDICTABLE(vrndacc0x3 < voutput_min_less_zero_point) ? voutput_min_less_zero_point : vrndacc0x3;
- vrndacc1x0 = XNN_UNPREDICTABLE(vrndacc1x0 < voutput_min_less_zero_point) ? voutput_min_less_zero_point : vrndacc1x0;
- vrndacc1x1 = XNN_UNPREDICTABLE(vrndacc1x1 < voutput_min_less_zero_point) ? voutput_min_less_zero_point : vrndacc1x1;
- vrndacc1x2 = XNN_UNPREDICTABLE(vrndacc1x2 < voutput_min_less_zero_point) ? voutput_min_less_zero_point : vrndacc1x2;
- vrndacc1x3 = XNN_UNPREDICTABLE(vrndacc1x3 < voutput_min_less_zero_point) ? voutput_min_less_zero_point : vrndacc1x3;
- vrndacc2x0 = XNN_UNPREDICTABLE(vrndacc2x0 < voutput_min_less_zero_point) ? voutput_min_less_zero_point : vrndacc2x0;
- vrndacc2x1 = XNN_UNPREDICTABLE(vrndacc2x1 < voutput_min_less_zero_point) ? voutput_min_less_zero_point : vrndacc2x1;
- vrndacc2x2 = XNN_UNPREDICTABLE(vrndacc2x2 < voutput_min_less_zero_point) ? voutput_min_less_zero_point : vrndacc2x2;
- vrndacc2x3 = XNN_UNPREDICTABLE(vrndacc2x3 < voutput_min_less_zero_point) ? voutput_min_less_zero_point : vrndacc2x3;
-
- const long voutput_max_less_zero_point = params->fp32_scalar_lrint.output_max_less_zero_point;
- vrndacc0x0 = XNN_UNPREDICTABLE(vrndacc0x0 > voutput_max_less_zero_point) ? voutput_max_less_zero_point : vrndacc0x0;
- vrndacc0x1 = XNN_UNPREDICTABLE(vrndacc0x1 > voutput_max_less_zero_point) ? voutput_max_less_zero_point : vrndacc0x1;
- vrndacc0x2 = XNN_UNPREDICTABLE(vrndacc0x2 > voutput_max_less_zero_point) ? voutput_max_less_zero_point : vrndacc0x2;
- vrndacc0x3 = XNN_UNPREDICTABLE(vrndacc0x3 > voutput_max_less_zero_point) ? voutput_max_less_zero_point : vrndacc0x3;
- vrndacc1x0 = XNN_UNPREDICTABLE(vrndacc1x0 > voutput_max_less_zero_point) ? voutput_max_less_zero_point : vrndacc1x0;
- vrndacc1x1 = XNN_UNPREDICTABLE(vrndacc1x1 > voutput_max_less_zero_point) ? voutput_max_less_zero_point : vrndacc1x1;
- vrndacc1x2 = XNN_UNPREDICTABLE(vrndacc1x2 > voutput_max_less_zero_point) ? voutput_max_less_zero_point : vrndacc1x2;
- vrndacc1x3 = XNN_UNPREDICTABLE(vrndacc1x3 > voutput_max_less_zero_point) ? voutput_max_less_zero_point : vrndacc1x3;
- vrndacc2x0 = XNN_UNPREDICTABLE(vrndacc2x0 > voutput_max_less_zero_point) ? voutput_max_less_zero_point : vrndacc2x0;
- vrndacc2x1 = XNN_UNPREDICTABLE(vrndacc2x1 > voutput_max_less_zero_point) ? voutput_max_less_zero_point : vrndacc2x1;
- vrndacc2x2 = XNN_UNPREDICTABLE(vrndacc2x2 > voutput_max_less_zero_point) ? voutput_max_less_zero_point : vrndacc2x2;
- vrndacc2x3 = XNN_UNPREDICTABLE(vrndacc2x3 > voutput_max_less_zero_point) ? voutput_max_less_zero_point : vrndacc2x3;
-
- const int32_t voutput_zero_point = params->fp32_scalar_lrint.output_zero_point;
- int32_t vout0x0 = (int32_t) vrndacc0x0 + voutput_zero_point;
- int32_t vout0x1 = (int32_t) vrndacc0x1 + voutput_zero_point;
- int32_t vout0x2 = (int32_t) vrndacc0x2 + voutput_zero_point;
- int32_t vout0x3 = (int32_t) vrndacc0x3 + voutput_zero_point;
- int32_t vout1x0 = (int32_t) vrndacc1x0 + voutput_zero_point;
- int32_t vout1x1 = (int32_t) vrndacc1x1 + voutput_zero_point;
- int32_t vout1x2 = (int32_t) vrndacc1x2 + voutput_zero_point;
- int32_t vout1x3 = (int32_t) vrndacc1x3 + voutput_zero_point;
- int32_t vout2x0 = (int32_t) vrndacc2x0 + voutput_zero_point;
- int32_t vout2x1 = (int32_t) vrndacc2x1 + voutput_zero_point;
- int32_t vout2x2 = (int32_t) vrndacc2x2 + voutput_zero_point;
- int32_t vout2x3 = (int32_t) vrndacc2x3 + voutput_zero_point;
-
- if XNN_LIKELY(nc >= 4) {
- c0[0] = (int8_t) vout0x0;
- c0[1] = (int8_t) vout0x1;
- c0[2] = (int8_t) vout0x2;
- c0[3] = (int8_t) vout0x3;
- c1[0] = (int8_t) vout1x0;
- c1[1] = (int8_t) vout1x1;
- c1[2] = (int8_t) vout1x2;
- c1[3] = (int8_t) vout1x3;
- c2[0] = (int8_t) vout2x0;
- c2[1] = (int8_t) vout2x1;
- c2[2] = (int8_t) vout2x2;
- c2[3] = (int8_t) vout2x3;
-
- a0 = (const int8_t*) ((uintptr_t) a0 - kc);
- a1 = (const int8_t*) ((uintptr_t) a1 - kc);
- a2 = (const int8_t*) ((uintptr_t) a2 - kc);
-
- c0 = (int8_t*) ((uintptr_t) c0 + cn_stride);
- c1 = (int8_t*) ((uintptr_t) c1 + cn_stride);
- c2 = (int8_t*) ((uintptr_t) c2 + cn_stride);
-
- nc -= 4;
- } else {
- if (nc & 2) {
- c0[0] = (int8_t) vout0x0;
- c0[1] = (int8_t) vout0x1;
- vout0x0 = vout0x2;
- c0 += 2;
- c1[0] = (int8_t) vout1x0;
- c1[1] = (int8_t) vout1x1;
- vout1x0 = vout1x2;
- c1 += 2;
- c2[0] = (int8_t) vout2x0;
- c2[1] = (int8_t) vout2x1;
- vout2x0 = vout2x2;
- c2 += 2;
- }
- if (nc & 1) {
- c0[0] = (int8_t) vout0x0;
- c1[0] = (int8_t) vout1x0;
- c2[0] = (int8_t) vout2x0;
- }
-
- nc = 0;
- }
- } while (nc != 0);
-}
diff --git a/src/qs8-gemm/gen/3x4-minmax-fp32-scalar-magic.c b/src/qs8-gemm/gen/3x4-minmax-fp32-scalar-lrintf.c
similarity index 76%
copy from src/qs8-gemm/gen/3x4-minmax-fp32-scalar-magic.c
copy to src/qs8-gemm/gen/3x4-minmax-fp32-scalar-lrintf.c
index 8b966d9..6765b04 100644
--- a/src/qs8-gemm/gen/3x4-minmax-fp32-scalar-magic.c
+++ b/src/qs8-gemm/gen/3x4-minmax-fp32-scalar-lrintf.c
@@ -8,14 +8,13 @@
// LICENSE file in the root directory of this source tree.
#include <assert.h>
-
-#include <fp16.h>
+#include <math.h>
#include <xnnpack/math.h>
#include <xnnpack/gemm.h>
-void xnn_qs8_gemm_minmax_fp32_ukernel_3x4__scalar_magic(
+void xnn_qs8_gemm_minmax_fp32_ukernel_3x4__scalar_lrintf(
size_t mr,
size_t nc,
size_t kc,
@@ -103,7 +102,7 @@
float vfpacc2x2 = (float) vacc2x2;
float vfpacc2x3 = (float) vacc2x3;
- const float vscale = params->fp32_scalar_magic.scale;
+ const float vscale = params->fp32_scalar_lrintf.scale;
vfpacc0x0 *= vscale;
vfpacc0x1 *= vscale;
vfpacc0x2 *= vscale;
@@ -117,7 +116,7 @@
vfpacc2x2 *= vscale;
vfpacc2x3 *= vscale;
- const float voutput_min_less_zero_point = params->fp32_scalar_magic.output_min_less_zero_point;
+ const float voutput_min_less_zero_point = params->fp32_scalar_lrintf.output_min_less_zero_point;
vfpacc0x0 = math_max_f32(vfpacc0x0, voutput_min_less_zero_point);
vfpacc0x1 = math_max_f32(vfpacc0x1, voutput_min_less_zero_point);
vfpacc0x2 = math_max_f32(vfpacc0x2, voutput_min_less_zero_point);
@@ -131,7 +130,7 @@
vfpacc2x2 = math_max_f32(vfpacc2x2, voutput_min_less_zero_point);
vfpacc2x3 = math_max_f32(vfpacc2x3, voutput_min_less_zero_point);
- const float voutput_max_less_zero_point = params->fp32_scalar_magic.output_max_less_zero_point;
+ const float voutput_max_less_zero_point = params->fp32_scalar_lrintf.output_max_less_zero_point;
vfpacc0x0 = math_min_f32(vfpacc0x0, voutput_max_less_zero_point);
vfpacc0x1 = math_min_f32(vfpacc0x1, voutput_max_less_zero_point);
vfpacc0x2 = math_min_f32(vfpacc0x2, voutput_max_less_zero_point);
@@ -145,33 +144,32 @@
vfpacc2x2 = math_min_f32(vfpacc2x2, voutput_max_less_zero_point);
vfpacc2x3 = math_min_f32(vfpacc2x3, voutput_max_less_zero_point);
- const float vmagic_bias = params->fp32_scalar_magic.magic_bias;
- vfpacc0x0 += vmagic_bias;
- vfpacc0x1 += vmagic_bias;
- vfpacc0x2 += vmagic_bias;
- vfpacc0x3 += vmagic_bias;
- vfpacc1x0 += vmagic_bias;
- vfpacc1x1 += vmagic_bias;
- vfpacc1x2 += vmagic_bias;
- vfpacc1x3 += vmagic_bias;
- vfpacc2x0 += vmagic_bias;
- vfpacc2x1 += vmagic_bias;
- vfpacc2x2 += vmagic_bias;
- vfpacc2x3 += vmagic_bias;
+ const int32_t vrndacc0x0 = (int32_t) lrintf(vfpacc0x0);
+ const int32_t vrndacc0x1 = (int32_t) lrintf(vfpacc0x1);
+ const int32_t vrndacc0x2 = (int32_t) lrintf(vfpacc0x2);
+ const int32_t vrndacc0x3 = (int32_t) lrintf(vfpacc0x3);
+ const int32_t vrndacc1x0 = (int32_t) lrintf(vfpacc1x0);
+ const int32_t vrndacc1x1 = (int32_t) lrintf(vfpacc1x1);
+ const int32_t vrndacc1x2 = (int32_t) lrintf(vfpacc1x2);
+ const int32_t vrndacc1x3 = (int32_t) lrintf(vfpacc1x3);
+ const int32_t vrndacc2x0 = (int32_t) lrintf(vfpacc2x0);
+ const int32_t vrndacc2x1 = (int32_t) lrintf(vfpacc2x1);
+ const int32_t vrndacc2x2 = (int32_t) lrintf(vfpacc2x2);
+ const int32_t vrndacc2x3 = (int32_t) lrintf(vfpacc2x3);
- const int32_t vmagic_bias_less_output_zero_point = params->fp32_scalar_magic.magic_bias_less_output_zero_point;
- int32_t vout0x0 = (int32_t) fp32_to_bits(vfpacc0x0) - vmagic_bias_less_output_zero_point;
- int32_t vout0x1 = (int32_t) fp32_to_bits(vfpacc0x1) - vmagic_bias_less_output_zero_point;
- int32_t vout0x2 = (int32_t) fp32_to_bits(vfpacc0x2) - vmagic_bias_less_output_zero_point;
- int32_t vout0x3 = (int32_t) fp32_to_bits(vfpacc0x3) - vmagic_bias_less_output_zero_point;
- int32_t vout1x0 = (int32_t) fp32_to_bits(vfpacc1x0) - vmagic_bias_less_output_zero_point;
- int32_t vout1x1 = (int32_t) fp32_to_bits(vfpacc1x1) - vmagic_bias_less_output_zero_point;
- int32_t vout1x2 = (int32_t) fp32_to_bits(vfpacc1x2) - vmagic_bias_less_output_zero_point;
- int32_t vout1x3 = (int32_t) fp32_to_bits(vfpacc1x3) - vmagic_bias_less_output_zero_point;
- int32_t vout2x0 = (int32_t) fp32_to_bits(vfpacc2x0) - vmagic_bias_less_output_zero_point;
- int32_t vout2x1 = (int32_t) fp32_to_bits(vfpacc2x1) - vmagic_bias_less_output_zero_point;
- int32_t vout2x2 = (int32_t) fp32_to_bits(vfpacc2x2) - vmagic_bias_less_output_zero_point;
- int32_t vout2x3 = (int32_t) fp32_to_bits(vfpacc2x3) - vmagic_bias_less_output_zero_point;
+ const int32_t voutput_zero_point = params->fp32_scalar_lrintf.output_zero_point;
+ int32_t vout0x0 = vrndacc0x0 + voutput_zero_point;
+ int32_t vout0x1 = vrndacc0x1 + voutput_zero_point;
+ int32_t vout0x2 = vrndacc0x2 + voutput_zero_point;
+ int32_t vout0x3 = vrndacc0x3 + voutput_zero_point;
+ int32_t vout1x0 = vrndacc1x0 + voutput_zero_point;
+ int32_t vout1x1 = vrndacc1x1 + voutput_zero_point;
+ int32_t vout1x2 = vrndacc1x2 + voutput_zero_point;
+ int32_t vout1x3 = vrndacc1x3 + voutput_zero_point;
+ int32_t vout2x0 = vrndacc2x0 + voutput_zero_point;
+ int32_t vout2x1 = vrndacc2x1 + voutput_zero_point;
+ int32_t vout2x2 = vrndacc2x2 + voutput_zero_point;
+ int32_t vout2x3 = vrndacc2x3 + voutput_zero_point;
if XNN_LIKELY(nc >= 4) {
c0[0] = (int8_t) vout0x0;
diff --git a/src/qs8-gemm/gen/3x4-minmax-rndnu-scalar.c b/src/qs8-gemm/gen/3x4-minmax-rndnu-scalar.c
deleted file mode 100644
index 01acab8..0000000
--- a/src/qs8-gemm/gen/3x4-minmax-rndnu-scalar.c
+++ /dev/null
@@ -1,209 +0,0 @@
-// Auto-generated file. Do not edit!
-// Template: src/qs8-gemm/scalar.c.in
-// Generator: tools/xngen
-//
-// Copyright 2021 Google LLC
-//
-// This source code is licensed under the BSD-style license found in the
-// LICENSE file in the root directory of this source tree.
-
-#include <assert.h>
-
-#include <xnnpack/math.h>
-#include <xnnpack/gemm.h>
-
-
-void xnn_qs8_gemm_minmax_rndnu_ukernel_3x4__scalar(
- size_t mr,
- size_t nc,
- size_t kc,
- const int8_t* restrict a,
- size_t a_stride,
- const void* restrict w,
- int8_t* restrict c,
- size_t cm_stride,
- size_t cn_stride,
- const union xnn_qs8_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
-{
- assert(mr != 0);
- assert(mr <= 3);
- assert(nc != 0);
- assert(kc != 0);
-
- const int8_t* a0 = a;
- int8_t* c0 = c;
- const int8_t* a1 = (const int8_t*) ((uintptr_t) a0 + a_stride);
- int8_t* c1 = (int8_t*) ((uintptr_t) c0 + cm_stride);
- if XNN_UNPREDICTABLE(mr < 2) {
- a1 = a0;
- c1 = c0;
- }
- const int8_t* a2 = (const int8_t*) ((uintptr_t) a1 + a_stride);
- int8_t* c2 = (int8_t*) ((uintptr_t) c1 + cm_stride);
- if XNN_UNPREDICTABLE(mr <= 2) {
- a2 = a1;
- c2 = c1;
- }
-
- do {
- int32_t vacc0x0 = ((const int32_t*) w)[0];
- int32_t vacc0x1 = ((const int32_t*) w)[1];
- int32_t vacc0x2 = ((const int32_t*) w)[2];
- int32_t vacc0x3 = ((const int32_t*) w)[3];
- int32_t vacc1x0 = vacc0x0;
- int32_t vacc1x1 = vacc0x1;
- int32_t vacc1x2 = vacc0x2;
- int32_t vacc1x3 = vacc0x3;
- int32_t vacc2x0 = vacc0x0;
- int32_t vacc2x1 = vacc0x1;
- int32_t vacc2x2 = vacc0x2;
- int32_t vacc2x3 = vacc0x3;
- w = (const void*) ((const int32_t*) w + 4);
-
- size_t k = kc;
- do {
- const int32_t va0 = (int32_t) *a0++;
- const int32_t va1 = (int32_t) *a1++;
- const int32_t va2 = (int32_t) *a2++;
-
- const int32_t vb0 = (int32_t) ((const int8_t*) w)[0];
- const int32_t vb1 = (int32_t) ((const int8_t*) w)[1];
- const int32_t vb2 = (int32_t) ((const int8_t*) w)[2];
- const int32_t vb3 = (int32_t) ((const int8_t*) w)[3];
- w = (const void*) ((const int8_t*) w + 4);
-
- vacc0x0 += va0 * vb0;
- vacc0x1 += va0 * vb1;
- vacc0x2 += va0 * vb2;
- vacc0x3 += va0 * vb3;
- vacc1x0 += va1 * vb0;
- vacc1x1 += va1 * vb1;
- vacc1x2 += va1 * vb2;
- vacc1x3 += va1 * vb3;
- vacc2x0 += va2 * vb0;
- vacc2x1 += va2 * vb1;
- vacc2x2 += va2 * vb2;
- vacc2x3 += va2 * vb3;
-
- k -= sizeof(int8_t);
- } while (k != 0);
-
- const int32_t vmultiplier = params->rndnu_scalar.multiplier;
- const int64_t vproduct0x0 = (int64_t) vacc0x0 * (int64_t) vmultiplier;
- const int64_t vproduct0x1 = (int64_t) vacc0x1 * (int64_t) vmultiplier;
- const int64_t vproduct0x2 = (int64_t) vacc0x2 * (int64_t) vmultiplier;
- const int64_t vproduct0x3 = (int64_t) vacc0x3 * (int64_t) vmultiplier;
- const int64_t vproduct1x0 = (int64_t) vacc1x0 * (int64_t) vmultiplier;
- const int64_t vproduct1x1 = (int64_t) vacc1x1 * (int64_t) vmultiplier;
- const int64_t vproduct1x2 = (int64_t) vacc1x2 * (int64_t) vmultiplier;
- const int64_t vproduct1x3 = (int64_t) vacc1x3 * (int64_t) vmultiplier;
- const int64_t vproduct2x0 = (int64_t) vacc2x0 * (int64_t) vmultiplier;
- const int64_t vproduct2x1 = (int64_t) vacc2x1 * (int64_t) vmultiplier;
- const int64_t vproduct2x2 = (int64_t) vacc2x2 * (int64_t) vmultiplier;
- const int64_t vproduct2x3 = (int64_t) vacc2x3 * (int64_t) vmultiplier;
-
- const uint32_t shift = params->rndnu_scalar.shift;
- const int64_t rounding = params->rndnu_scalar.rounding;
- int32_t vout0x0 = (int32_t) asr_s64(vproduct0x0 + rounding, shift);
- int32_t vout0x1 = (int32_t) asr_s64(vproduct0x1 + rounding, shift);
- int32_t vout0x2 = (int32_t) asr_s64(vproduct0x2 + rounding, shift);
- int32_t vout0x3 = (int32_t) asr_s64(vproduct0x3 + rounding, shift);
- int32_t vout1x0 = (int32_t) asr_s64(vproduct1x0 + rounding, shift);
- int32_t vout1x1 = (int32_t) asr_s64(vproduct1x1 + rounding, shift);
- int32_t vout1x2 = (int32_t) asr_s64(vproduct1x2 + rounding, shift);
- int32_t vout1x3 = (int32_t) asr_s64(vproduct1x3 + rounding, shift);
- int32_t vout2x0 = (int32_t) asr_s64(vproduct2x0 + rounding, shift);
- int32_t vout2x1 = (int32_t) asr_s64(vproduct2x1 + rounding, shift);
- int32_t vout2x2 = (int32_t) asr_s64(vproduct2x2 + rounding, shift);
- int32_t vout2x3 = (int32_t) asr_s64(vproduct2x3 + rounding, shift);
-
- const int32_t voutput_min_less_zero_point = params->rndnu_scalar.output_min_less_zero_point;
- vout0x0 = math_max_s32(vout0x0, voutput_min_less_zero_point);
- vout0x1 = math_max_s32(vout0x1, voutput_min_less_zero_point);
- vout0x2 = math_max_s32(vout0x2, voutput_min_less_zero_point);
- vout0x3 = math_max_s32(vout0x3, voutput_min_less_zero_point);
- vout1x0 = math_max_s32(vout1x0, voutput_min_less_zero_point);
- vout1x1 = math_max_s32(vout1x1, voutput_min_less_zero_point);
- vout1x2 = math_max_s32(vout1x2, voutput_min_less_zero_point);
- vout1x3 = math_max_s32(vout1x3, voutput_min_less_zero_point);
- vout2x0 = math_max_s32(vout2x0, voutput_min_less_zero_point);
- vout2x1 = math_max_s32(vout2x1, voutput_min_less_zero_point);
- vout2x2 = math_max_s32(vout2x2, voutput_min_less_zero_point);
- vout2x3 = math_max_s32(vout2x3, voutput_min_less_zero_point);
-
- const int32_t voutput_max_less_zero_point = params->rndnu_scalar.output_max_less_zero_point;
- vout0x0 = math_min_s32(vout0x0, voutput_max_less_zero_point);
- vout0x1 = math_min_s32(vout0x1, voutput_max_less_zero_point);
- vout0x2 = math_min_s32(vout0x2, voutput_max_less_zero_point);
- vout0x3 = math_min_s32(vout0x3, voutput_max_less_zero_point);
- vout1x0 = math_min_s32(vout1x0, voutput_max_less_zero_point);
- vout1x1 = math_min_s32(vout1x1, voutput_max_less_zero_point);
- vout1x2 = math_min_s32(vout1x2, voutput_max_less_zero_point);
- vout1x3 = math_min_s32(vout1x3, voutput_max_less_zero_point);
- vout2x0 = math_min_s32(vout2x0, voutput_max_less_zero_point);
- vout2x1 = math_min_s32(vout2x1, voutput_max_less_zero_point);
- vout2x2 = math_min_s32(vout2x2, voutput_max_less_zero_point);
- vout2x3 = math_min_s32(vout2x3, voutput_max_less_zero_point);
-
- const int32_t voutput_zero_point = params->rndnu_scalar.output_zero_point;
- vout0x0 += voutput_zero_point;
- vout0x1 += voutput_zero_point;
- vout0x2 += voutput_zero_point;
- vout0x3 += voutput_zero_point;
- vout1x0 += voutput_zero_point;
- vout1x1 += voutput_zero_point;
- vout1x2 += voutput_zero_point;
- vout1x3 += voutput_zero_point;
- vout2x0 += voutput_zero_point;
- vout2x1 += voutput_zero_point;
- vout2x2 += voutput_zero_point;
- vout2x3 += voutput_zero_point;
-
- if XNN_LIKELY(nc >= 4) {
- c0[0] = (int8_t) vout0x0;
- c0[1] = (int8_t) vout0x1;
- c0[2] = (int8_t) vout0x2;
- c0[3] = (int8_t) vout0x3;
- c1[0] = (int8_t) vout1x0;
- c1[1] = (int8_t) vout1x1;
- c1[2] = (int8_t) vout1x2;
- c1[3] = (int8_t) vout1x3;
- c2[0] = (int8_t) vout2x0;
- c2[1] = (int8_t) vout2x1;
- c2[2] = (int8_t) vout2x2;
- c2[3] = (int8_t) vout2x3;
-
- a0 = (const int8_t*) ((uintptr_t) a0 - kc);
- a1 = (const int8_t*) ((uintptr_t) a1 - kc);
- a2 = (const int8_t*) ((uintptr_t) a2 - kc);
-
- c0 = (int8_t*) ((uintptr_t) c0 + cn_stride);
- c1 = (int8_t*) ((uintptr_t) c1 + cn_stride);
- c2 = (int8_t*) ((uintptr_t) c2 + cn_stride);
-
- nc -= 4;
- } else {
- if (nc & 2) {
- c0[0] = (int8_t) vout0x0;
- c0[1] = (int8_t) vout0x1;
- vout0x0 = vout0x2;
- c0 += 2;
- c1[0] = (int8_t) vout1x0;
- c1[1] = (int8_t) vout1x1;
- vout1x0 = vout1x2;
- c1 += 2;
- c2[0] = (int8_t) vout2x0;
- c2[1] = (int8_t) vout2x1;
- vout2x0 = vout2x2;
- c2 += 2;
- }
- if (nc & 1) {
- c0[0] = (int8_t) vout0x0;
- c1[0] = (int8_t) vout1x0;
- c2[0] = (int8_t) vout2x0;
- }
-
- nc = 0;
- }
- } while (nc != 0);
-}
diff --git a/src/qs8-gemm/gen/4x2-minmax-fp32-scalar-magic.c b/src/qs8-gemm/gen/4x2-minmax-fp32-scalar-fmagic.c
similarity index 94%
rename from src/qs8-gemm/gen/4x2-minmax-fp32-scalar-magic.c
rename to src/qs8-gemm/gen/4x2-minmax-fp32-scalar-fmagic.c
index dd70e6b..27f14d2 100644
--- a/src/qs8-gemm/gen/4x2-minmax-fp32-scalar-magic.c
+++ b/src/qs8-gemm/gen/4x2-minmax-fp32-scalar-fmagic.c
@@ -15,7 +15,7 @@
#include <xnnpack/gemm.h>
-void xnn_qs8_gemm_minmax_fp32_ukernel_4x2__scalar_magic(
+void xnn_qs8_gemm_minmax_fp32_ukernel_4x2__scalar_fmagic(
size_t mr,
size_t nc,
size_t kc,
@@ -96,7 +96,7 @@
float vfpacc3x0 = (float) vacc3x0;
float vfpacc3x1 = (float) vacc3x1;
- const float vscale = params->fp32_scalar_magic.scale;
+ const float vscale = params->fp32_scalar_fmagic.scale;
vfpacc0x0 *= vscale;
vfpacc0x1 *= vscale;
vfpacc1x0 *= vscale;
@@ -106,7 +106,7 @@
vfpacc3x0 *= vscale;
vfpacc3x1 *= vscale;
- const float voutput_min_less_zero_point = params->fp32_scalar_magic.output_min_less_zero_point;
+ const float voutput_min_less_zero_point = params->fp32_scalar_fmagic.output_min_less_zero_point;
vfpacc0x0 = math_max_f32(vfpacc0x0, voutput_min_less_zero_point);
vfpacc0x1 = math_max_f32(vfpacc0x1, voutput_min_less_zero_point);
vfpacc1x0 = math_max_f32(vfpacc1x0, voutput_min_less_zero_point);
@@ -116,7 +116,7 @@
vfpacc3x0 = math_max_f32(vfpacc3x0, voutput_min_less_zero_point);
vfpacc3x1 = math_max_f32(vfpacc3x1, voutput_min_less_zero_point);
- const float voutput_max_less_zero_point = params->fp32_scalar_magic.output_max_less_zero_point;
+ const float voutput_max_less_zero_point = params->fp32_scalar_fmagic.output_max_less_zero_point;
vfpacc0x0 = math_min_f32(vfpacc0x0, voutput_max_less_zero_point);
vfpacc0x1 = math_min_f32(vfpacc0x1, voutput_max_less_zero_point);
vfpacc1x0 = math_min_f32(vfpacc1x0, voutput_max_less_zero_point);
@@ -126,7 +126,7 @@
vfpacc3x0 = math_min_f32(vfpacc3x0, voutput_max_less_zero_point);
vfpacc3x1 = math_min_f32(vfpacc3x1, voutput_max_less_zero_point);
- const float vmagic_bias = params->fp32_scalar_magic.magic_bias;
+ const float vmagic_bias = params->fp32_scalar_fmagic.magic_bias;
vfpacc0x0 += vmagic_bias;
vfpacc0x1 += vmagic_bias;
vfpacc1x0 += vmagic_bias;
@@ -136,7 +136,7 @@
vfpacc3x0 += vmagic_bias;
vfpacc3x1 += vmagic_bias;
- const int32_t vmagic_bias_less_output_zero_point = params->fp32_scalar_magic.magic_bias_less_output_zero_point;
+ const int32_t vmagic_bias_less_output_zero_point = params->fp32_scalar_fmagic.magic_bias_less_output_zero_point;
int32_t vout0x0 = (int32_t) fp32_to_bits(vfpacc0x0) - vmagic_bias_less_output_zero_point;
int32_t vout0x1 = (int32_t) fp32_to_bits(vfpacc0x1) - vmagic_bias_less_output_zero_point;
int32_t vout1x0 = (int32_t) fp32_to_bits(vfpacc1x0) - vmagic_bias_less_output_zero_point;
diff --git a/src/qs8-gemm/gen/4x2-minmax-fp32-scalar-lrint.c b/src/qs8-gemm/gen/4x2-minmax-fp32-scalar-lrint.c
deleted file mode 100644
index b4cfd80..0000000
--- a/src/qs8-gemm/gen/4x2-minmax-fp32-scalar-lrint.c
+++ /dev/null
@@ -1,179 +0,0 @@
-// Auto-generated file. Do not edit!
-// Template: src/qs8-gemm/scalar.c.in
-// Generator: tools/xngen
-//
-// Copyright 2021 Google LLC
-//
-// This source code is licensed under the BSD-style license found in the
-// LICENSE file in the root directory of this source tree.
-
-#include <assert.h>
-#include <math.h>
-
-#include <xnnpack/math.h>
-#include <xnnpack/gemm.h>
-
-
-void xnn_qs8_gemm_minmax_fp32_ukernel_4x2__scalar_lrint(
- size_t mr,
- size_t nc,
- size_t kc,
- const int8_t* restrict a,
- size_t a_stride,
- const void* restrict w,
- int8_t* restrict c,
- size_t cm_stride,
- size_t cn_stride,
- const union xnn_qs8_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
-{
- assert(mr != 0);
- assert(mr <= 4);
- assert(nc != 0);
- assert(kc != 0);
-
- const int8_t* a0 = a;
- int8_t* c0 = c;
- const int8_t* a1 = (const int8_t*) ((uintptr_t) a0 + a_stride);
- int8_t* c1 = (int8_t*) ((uintptr_t) c0 + cm_stride);
- if XNN_UNPREDICTABLE(mr < 2) {
- a1 = a0;
- c1 = c0;
- }
- const int8_t* a2 = (const int8_t*) ((uintptr_t) a1 + a_stride);
- int8_t* c2 = (int8_t*) ((uintptr_t) c1 + cm_stride);
- if XNN_UNPREDICTABLE(mr <= 2) {
- a2 = a1;
- c2 = c1;
- }
- const int8_t* a3 = (const int8_t*) ((uintptr_t) a2 + a_stride);
- int8_t* c3 = (int8_t*) ((uintptr_t) c2 + cm_stride);
- if XNN_UNPREDICTABLE(mr != 4) {
- a3 = a2;
- c3 = c2;
- }
-
- do {
- int32_t vacc0x0 = ((const int32_t*) w)[0];
- int32_t vacc0x1 = ((const int32_t*) w)[1];
- int32_t vacc1x0 = vacc0x0;
- int32_t vacc1x1 = vacc0x1;
- int32_t vacc2x0 = vacc0x0;
- int32_t vacc2x1 = vacc0x1;
- int32_t vacc3x0 = vacc0x0;
- int32_t vacc3x1 = vacc0x1;
- w = (const void*) ((const int32_t*) w + 2);
-
- size_t k = kc;
- do {
- const int32_t va0 = (int32_t) *a0++;
- const int32_t va1 = (int32_t) *a1++;
- const int32_t va2 = (int32_t) *a2++;
- const int32_t va3 = (int32_t) *a3++;
-
- const int32_t vb0 = (int32_t) ((const int8_t*) w)[0];
- const int32_t vb1 = (int32_t) ((const int8_t*) w)[1];
- w = (const void*) ((const int8_t*) w + 2);
-
- vacc0x0 += va0 * vb0;
- vacc0x1 += va0 * vb1;
- vacc1x0 += va1 * vb0;
- vacc1x1 += va1 * vb1;
- vacc2x0 += va2 * vb0;
- vacc2x1 += va2 * vb1;
- vacc3x0 += va3 * vb0;
- vacc3x1 += va3 * vb1;
-
- k -= sizeof(int8_t);
- } while (k != 0);
-
- float vfpacc0x0 = (float) vacc0x0;
- float vfpacc0x1 = (float) vacc0x1;
- float vfpacc1x0 = (float) vacc1x0;
- float vfpacc1x1 = (float) vacc1x1;
- float vfpacc2x0 = (float) vacc2x0;
- float vfpacc2x1 = (float) vacc2x1;
- float vfpacc3x0 = (float) vacc3x0;
- float vfpacc3x1 = (float) vacc3x1;
-
- const float vscale = params->fp32_scalar_lrint.scale;
- vfpacc0x0 *= vscale;
- vfpacc0x1 *= vscale;
- vfpacc1x0 *= vscale;
- vfpacc1x1 *= vscale;
- vfpacc2x0 *= vscale;
- vfpacc2x1 *= vscale;
- vfpacc3x0 *= vscale;
- vfpacc3x1 *= vscale;
-
- long vrndacc0x0 = lrintf(vfpacc0x0);
- long vrndacc0x1 = lrintf(vfpacc0x1);
- long vrndacc1x0 = lrintf(vfpacc1x0);
- long vrndacc1x1 = lrintf(vfpacc1x1);
- long vrndacc2x0 = lrintf(vfpacc2x0);
- long vrndacc2x1 = lrintf(vfpacc2x1);
- long vrndacc3x0 = lrintf(vfpacc3x0);
- long vrndacc3x1 = lrintf(vfpacc3x1);
-
- const long voutput_min_less_zero_point = params->fp32_scalar_lrint.output_min_less_zero_point;
- vrndacc0x0 = XNN_UNPREDICTABLE(vrndacc0x0 < voutput_min_less_zero_point) ? voutput_min_less_zero_point : vrndacc0x0;
- vrndacc0x1 = XNN_UNPREDICTABLE(vrndacc0x1 < voutput_min_less_zero_point) ? voutput_min_less_zero_point : vrndacc0x1;
- vrndacc1x0 = XNN_UNPREDICTABLE(vrndacc1x0 < voutput_min_less_zero_point) ? voutput_min_less_zero_point : vrndacc1x0;
- vrndacc1x1 = XNN_UNPREDICTABLE(vrndacc1x1 < voutput_min_less_zero_point) ? voutput_min_less_zero_point : vrndacc1x1;
- vrndacc2x0 = XNN_UNPREDICTABLE(vrndacc2x0 < voutput_min_less_zero_point) ? voutput_min_less_zero_point : vrndacc2x0;
- vrndacc2x1 = XNN_UNPREDICTABLE(vrndacc2x1 < voutput_min_less_zero_point) ? voutput_min_less_zero_point : vrndacc2x1;
- vrndacc3x0 = XNN_UNPREDICTABLE(vrndacc3x0 < voutput_min_less_zero_point) ? voutput_min_less_zero_point : vrndacc3x0;
- vrndacc3x1 = XNN_UNPREDICTABLE(vrndacc3x1 < voutput_min_less_zero_point) ? voutput_min_less_zero_point : vrndacc3x1;
-
- const long voutput_max_less_zero_point = params->fp32_scalar_lrint.output_max_less_zero_point;
- vrndacc0x0 = XNN_UNPREDICTABLE(vrndacc0x0 > voutput_max_less_zero_point) ? voutput_max_less_zero_point : vrndacc0x0;
- vrndacc0x1 = XNN_UNPREDICTABLE(vrndacc0x1 > voutput_max_less_zero_point) ? voutput_max_less_zero_point : vrndacc0x1;
- vrndacc1x0 = XNN_UNPREDICTABLE(vrndacc1x0 > voutput_max_less_zero_point) ? voutput_max_less_zero_point : vrndacc1x0;
- vrndacc1x1 = XNN_UNPREDICTABLE(vrndacc1x1 > voutput_max_less_zero_point) ? voutput_max_less_zero_point : vrndacc1x1;
- vrndacc2x0 = XNN_UNPREDICTABLE(vrndacc2x0 > voutput_max_less_zero_point) ? voutput_max_less_zero_point : vrndacc2x0;
- vrndacc2x1 = XNN_UNPREDICTABLE(vrndacc2x1 > voutput_max_less_zero_point) ? voutput_max_less_zero_point : vrndacc2x1;
- vrndacc3x0 = XNN_UNPREDICTABLE(vrndacc3x0 > voutput_max_less_zero_point) ? voutput_max_less_zero_point : vrndacc3x0;
- vrndacc3x1 = XNN_UNPREDICTABLE(vrndacc3x1 > voutput_max_less_zero_point) ? voutput_max_less_zero_point : vrndacc3x1;
-
- const int32_t voutput_zero_point = params->fp32_scalar_lrint.output_zero_point;
- int32_t vout0x0 = (int32_t) vrndacc0x0 + voutput_zero_point;
- int32_t vout0x1 = (int32_t) vrndacc0x1 + voutput_zero_point;
- int32_t vout1x0 = (int32_t) vrndacc1x0 + voutput_zero_point;
- int32_t vout1x1 = (int32_t) vrndacc1x1 + voutput_zero_point;
- int32_t vout2x0 = (int32_t) vrndacc2x0 + voutput_zero_point;
- int32_t vout2x1 = (int32_t) vrndacc2x1 + voutput_zero_point;
- int32_t vout3x0 = (int32_t) vrndacc3x0 + voutput_zero_point;
- int32_t vout3x1 = (int32_t) vrndacc3x1 + voutput_zero_point;
-
- if XNN_LIKELY(nc >= 2) {
- c0[0] = (int8_t) vout0x0;
- c0[1] = (int8_t) vout0x1;
- c1[0] = (int8_t) vout1x0;
- c1[1] = (int8_t) vout1x1;
- c2[0] = (int8_t) vout2x0;
- c2[1] = (int8_t) vout2x1;
- c3[0] = (int8_t) vout3x0;
- c3[1] = (int8_t) vout3x1;
-
- a0 = (const int8_t*) ((uintptr_t) a0 - kc);
- a1 = (const int8_t*) ((uintptr_t) a1 - kc);
- a2 = (const int8_t*) ((uintptr_t) a2 - kc);
- a3 = (const int8_t*) ((uintptr_t) a3 - kc);
-
- c0 = (int8_t*) ((uintptr_t) c0 + cn_stride);
- c1 = (int8_t*) ((uintptr_t) c1 + cn_stride);
- c2 = (int8_t*) ((uintptr_t) c2 + cn_stride);
- c3 = (int8_t*) ((uintptr_t) c3 + cn_stride);
-
- nc -= 2;
- } else {
- if (nc & 1) {
- c0[0] = (int8_t) vout0x0;
- c1[0] = (int8_t) vout1x0;
- c2[0] = (int8_t) vout2x0;
- c3[0] = (int8_t) vout3x0;
- }
-
- nc = 0;
- }
- } while (nc != 0);
-}
diff --git a/src/qs8-gemm/gen/4x2-minmax-fp32-scalar-magic.c b/src/qs8-gemm/gen/4x2-minmax-fp32-scalar-lrintf.c
similarity index 77%
copy from src/qs8-gemm/gen/4x2-minmax-fp32-scalar-magic.c
copy to src/qs8-gemm/gen/4x2-minmax-fp32-scalar-lrintf.c
index dd70e6b..d0f64fe 100644
--- a/src/qs8-gemm/gen/4x2-minmax-fp32-scalar-magic.c
+++ b/src/qs8-gemm/gen/4x2-minmax-fp32-scalar-lrintf.c
@@ -8,14 +8,13 @@
// LICENSE file in the root directory of this source tree.
#include <assert.h>
-
-#include <fp16.h>
+#include <math.h>
#include <xnnpack/math.h>
#include <xnnpack/gemm.h>
-void xnn_qs8_gemm_minmax_fp32_ukernel_4x2__scalar_magic(
+void xnn_qs8_gemm_minmax_fp32_ukernel_4x2__scalar_lrintf(
size_t mr,
size_t nc,
size_t kc,
@@ -96,7 +95,7 @@
float vfpacc3x0 = (float) vacc3x0;
float vfpacc3x1 = (float) vacc3x1;
- const float vscale = params->fp32_scalar_magic.scale;
+ const float vscale = params->fp32_scalar_lrintf.scale;
vfpacc0x0 *= vscale;
vfpacc0x1 *= vscale;
vfpacc1x0 *= vscale;
@@ -106,7 +105,7 @@
vfpacc3x0 *= vscale;
vfpacc3x1 *= vscale;
- const float voutput_min_less_zero_point = params->fp32_scalar_magic.output_min_less_zero_point;
+ const float voutput_min_less_zero_point = params->fp32_scalar_lrintf.output_min_less_zero_point;
vfpacc0x0 = math_max_f32(vfpacc0x0, voutput_min_less_zero_point);
vfpacc0x1 = math_max_f32(vfpacc0x1, voutput_min_less_zero_point);
vfpacc1x0 = math_max_f32(vfpacc1x0, voutput_min_less_zero_point);
@@ -116,7 +115,7 @@
vfpacc3x0 = math_max_f32(vfpacc3x0, voutput_min_less_zero_point);
vfpacc3x1 = math_max_f32(vfpacc3x1, voutput_min_less_zero_point);
- const float voutput_max_less_zero_point = params->fp32_scalar_magic.output_max_less_zero_point;
+ const float voutput_max_less_zero_point = params->fp32_scalar_lrintf.output_max_less_zero_point;
vfpacc0x0 = math_min_f32(vfpacc0x0, voutput_max_less_zero_point);
vfpacc0x1 = math_min_f32(vfpacc0x1, voutput_max_less_zero_point);
vfpacc1x0 = math_min_f32(vfpacc1x0, voutput_max_less_zero_point);
@@ -126,25 +125,24 @@
vfpacc3x0 = math_min_f32(vfpacc3x0, voutput_max_less_zero_point);
vfpacc3x1 = math_min_f32(vfpacc3x1, voutput_max_less_zero_point);
- const float vmagic_bias = params->fp32_scalar_magic.magic_bias;
- vfpacc0x0 += vmagic_bias;
- vfpacc0x1 += vmagic_bias;
- vfpacc1x0 += vmagic_bias;
- vfpacc1x1 += vmagic_bias;
- vfpacc2x0 += vmagic_bias;
- vfpacc2x1 += vmagic_bias;
- vfpacc3x0 += vmagic_bias;
- vfpacc3x1 += vmagic_bias;
+ const int32_t vrndacc0x0 = (int32_t) lrintf(vfpacc0x0);
+ const int32_t vrndacc0x1 = (int32_t) lrintf(vfpacc0x1);
+ const int32_t vrndacc1x0 = (int32_t) lrintf(vfpacc1x0);
+ const int32_t vrndacc1x1 = (int32_t) lrintf(vfpacc1x1);
+ const int32_t vrndacc2x0 = (int32_t) lrintf(vfpacc2x0);
+ const int32_t vrndacc2x1 = (int32_t) lrintf(vfpacc2x1);
+ const int32_t vrndacc3x0 = (int32_t) lrintf(vfpacc3x0);
+ const int32_t vrndacc3x1 = (int32_t) lrintf(vfpacc3x1);
- const int32_t vmagic_bias_less_output_zero_point = params->fp32_scalar_magic.magic_bias_less_output_zero_point;
- int32_t vout0x0 = (int32_t) fp32_to_bits(vfpacc0x0) - vmagic_bias_less_output_zero_point;
- int32_t vout0x1 = (int32_t) fp32_to_bits(vfpacc0x1) - vmagic_bias_less_output_zero_point;
- int32_t vout1x0 = (int32_t) fp32_to_bits(vfpacc1x0) - vmagic_bias_less_output_zero_point;
- int32_t vout1x1 = (int32_t) fp32_to_bits(vfpacc1x1) - vmagic_bias_less_output_zero_point;
- int32_t vout2x0 = (int32_t) fp32_to_bits(vfpacc2x0) - vmagic_bias_less_output_zero_point;
- int32_t vout2x1 = (int32_t) fp32_to_bits(vfpacc2x1) - vmagic_bias_less_output_zero_point;
- int32_t vout3x0 = (int32_t) fp32_to_bits(vfpacc3x0) - vmagic_bias_less_output_zero_point;
- int32_t vout3x1 = (int32_t) fp32_to_bits(vfpacc3x1) - vmagic_bias_less_output_zero_point;
+ const int32_t voutput_zero_point = params->fp32_scalar_lrintf.output_zero_point;
+ int32_t vout0x0 = vrndacc0x0 + voutput_zero_point;
+ int32_t vout0x1 = vrndacc0x1 + voutput_zero_point;
+ int32_t vout1x0 = vrndacc1x0 + voutput_zero_point;
+ int32_t vout1x1 = vrndacc1x1 + voutput_zero_point;
+ int32_t vout2x0 = vrndacc2x0 + voutput_zero_point;
+ int32_t vout2x1 = vrndacc2x1 + voutput_zero_point;
+ int32_t vout3x0 = vrndacc3x0 + voutput_zero_point;
+ int32_t vout3x1 = vrndacc3x1 + voutput_zero_point;
if XNN_LIKELY(nc >= 2) {
c0[0] = (int8_t) vout0x0;
diff --git a/src/qs8-gemm/gen/4x2-minmax-rndnu-scalar.c b/src/qs8-gemm/gen/4x2-minmax-rndnu-scalar.c
deleted file mode 100644
index 13c2fb6..0000000
--- a/src/qs8-gemm/gen/4x2-minmax-rndnu-scalar.c
+++ /dev/null
@@ -1,171 +0,0 @@
-// Auto-generated file. Do not edit!
-// Template: src/qs8-gemm/scalar.c.in
-// Generator: tools/xngen
-//
-// Copyright 2021 Google LLC
-//
-// This source code is licensed under the BSD-style license found in the
-// LICENSE file in the root directory of this source tree.
-
-#include <assert.h>
-
-#include <xnnpack/math.h>
-#include <xnnpack/gemm.h>
-
-
-void xnn_qs8_gemm_minmax_rndnu_ukernel_4x2__scalar(
- size_t mr,
- size_t nc,
- size_t kc,
- const int8_t* restrict a,
- size_t a_stride,
- const void* restrict w,
- int8_t* restrict c,
- size_t cm_stride,
- size_t cn_stride,
- const union xnn_qs8_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
-{
- assert(mr != 0);
- assert(mr <= 4);
- assert(nc != 0);
- assert(kc != 0);
-
- const int8_t* a0 = a;
- int8_t* c0 = c;
- const int8_t* a1 = (const int8_t*) ((uintptr_t) a0 + a_stride);
- int8_t* c1 = (int8_t*) ((uintptr_t) c0 + cm_stride);
- if XNN_UNPREDICTABLE(mr < 2) {
- a1 = a0;
- c1 = c0;
- }
- const int8_t* a2 = (const int8_t*) ((uintptr_t) a1 + a_stride);
- int8_t* c2 = (int8_t*) ((uintptr_t) c1 + cm_stride);
- if XNN_UNPREDICTABLE(mr <= 2) {
- a2 = a1;
- c2 = c1;
- }
- const int8_t* a3 = (const int8_t*) ((uintptr_t) a2 + a_stride);
- int8_t* c3 = (int8_t*) ((uintptr_t) c2 + cm_stride);
- if XNN_UNPREDICTABLE(mr != 4) {
- a3 = a2;
- c3 = c2;
- }
-
- do {
- int32_t vacc0x0 = ((const int32_t*) w)[0];
- int32_t vacc0x1 = ((const int32_t*) w)[1];
- int32_t vacc1x0 = vacc0x0;
- int32_t vacc1x1 = vacc0x1;
- int32_t vacc2x0 = vacc0x0;
- int32_t vacc2x1 = vacc0x1;
- int32_t vacc3x0 = vacc0x0;
- int32_t vacc3x1 = vacc0x1;
- w = (const void*) ((const int32_t*) w + 2);
-
- size_t k = kc;
- do {
- const int32_t va0 = (int32_t) *a0++;
- const int32_t va1 = (int32_t) *a1++;
- const int32_t va2 = (int32_t) *a2++;
- const int32_t va3 = (int32_t) *a3++;
-
- const int32_t vb0 = (int32_t) ((const int8_t*) w)[0];
- const int32_t vb1 = (int32_t) ((const int8_t*) w)[1];
- w = (const void*) ((const int8_t*) w + 2);
-
- vacc0x0 += va0 * vb0;
- vacc0x1 += va0 * vb1;
- vacc1x0 += va1 * vb0;
- vacc1x1 += va1 * vb1;
- vacc2x0 += va2 * vb0;
- vacc2x1 += va2 * vb1;
- vacc3x0 += va3 * vb0;
- vacc3x1 += va3 * vb1;
-
- k -= sizeof(int8_t);
- } while (k != 0);
-
- const int32_t vmultiplier = params->rndnu_scalar.multiplier;
- const int64_t vproduct0x0 = (int64_t) vacc0x0 * (int64_t) vmultiplier;
- const int64_t vproduct0x1 = (int64_t) vacc0x1 * (int64_t) vmultiplier;
- const int64_t vproduct1x0 = (int64_t) vacc1x0 * (int64_t) vmultiplier;
- const int64_t vproduct1x1 = (int64_t) vacc1x1 * (int64_t) vmultiplier;
- const int64_t vproduct2x0 = (int64_t) vacc2x0 * (int64_t) vmultiplier;
- const int64_t vproduct2x1 = (int64_t) vacc2x1 * (int64_t) vmultiplier;
- const int64_t vproduct3x0 = (int64_t) vacc3x0 * (int64_t) vmultiplier;
- const int64_t vproduct3x1 = (int64_t) vacc3x1 * (int64_t) vmultiplier;
-
- const uint32_t shift = params->rndnu_scalar.shift;
- const int64_t rounding = params->rndnu_scalar.rounding;
- int32_t vout0x0 = (int32_t) asr_s64(vproduct0x0 + rounding, shift);
- int32_t vout0x1 = (int32_t) asr_s64(vproduct0x1 + rounding, shift);
- int32_t vout1x0 = (int32_t) asr_s64(vproduct1x0 + rounding, shift);
- int32_t vout1x1 = (int32_t) asr_s64(vproduct1x1 + rounding, shift);
- int32_t vout2x0 = (int32_t) asr_s64(vproduct2x0 + rounding, shift);
- int32_t vout2x1 = (int32_t) asr_s64(vproduct2x1 + rounding, shift);
- int32_t vout3x0 = (int32_t) asr_s64(vproduct3x0 + rounding, shift);
- int32_t vout3x1 = (int32_t) asr_s64(vproduct3x1 + rounding, shift);
-
- const int32_t voutput_min_less_zero_point = params->rndnu_scalar.output_min_less_zero_point;
- vout0x0 = math_max_s32(vout0x0, voutput_min_less_zero_point);
- vout0x1 = math_max_s32(vout0x1, voutput_min_less_zero_point);
- vout1x0 = math_max_s32(vout1x0, voutput_min_less_zero_point);
- vout1x1 = math_max_s32(vout1x1, voutput_min_less_zero_point);
- vout2x0 = math_max_s32(vout2x0, voutput_min_less_zero_point);
- vout2x1 = math_max_s32(vout2x1, voutput_min_less_zero_point);
- vout3x0 = math_max_s32(vout3x0, voutput_min_less_zero_point);
- vout3x1 = math_max_s32(vout3x1, voutput_min_less_zero_point);
-
- const int32_t voutput_max_less_zero_point = params->rndnu_scalar.output_max_less_zero_point;
- vout0x0 = math_min_s32(vout0x0, voutput_max_less_zero_point);
- vout0x1 = math_min_s32(vout0x1, voutput_max_less_zero_point);
- vout1x0 = math_min_s32(vout1x0, voutput_max_less_zero_point);
- vout1x1 = math_min_s32(vout1x1, voutput_max_less_zero_point);
- vout2x0 = math_min_s32(vout2x0, voutput_max_less_zero_point);
- vout2x1 = math_min_s32(vout2x1, voutput_max_less_zero_point);
- vout3x0 = math_min_s32(vout3x0, voutput_max_less_zero_point);
- vout3x1 = math_min_s32(vout3x1, voutput_max_less_zero_point);
-
- const int32_t voutput_zero_point = params->rndnu_scalar.output_zero_point;
- vout0x0 += voutput_zero_point;
- vout0x1 += voutput_zero_point;
- vout1x0 += voutput_zero_point;
- vout1x1 += voutput_zero_point;
- vout2x0 += voutput_zero_point;
- vout2x1 += voutput_zero_point;
- vout3x0 += voutput_zero_point;
- vout3x1 += voutput_zero_point;
-
- if XNN_LIKELY(nc >= 2) {
- c0[0] = (int8_t) vout0x0;
- c0[1] = (int8_t) vout0x1;
- c1[0] = (int8_t) vout1x0;
- c1[1] = (int8_t) vout1x1;
- c2[0] = (int8_t) vout2x0;
- c2[1] = (int8_t) vout2x1;
- c3[0] = (int8_t) vout3x0;
- c3[1] = (int8_t) vout3x1;
-
- a0 = (const int8_t*) ((uintptr_t) a0 - kc);
- a1 = (const int8_t*) ((uintptr_t) a1 - kc);
- a2 = (const int8_t*) ((uintptr_t) a2 - kc);
- a3 = (const int8_t*) ((uintptr_t) a3 - kc);
-
- c0 = (int8_t*) ((uintptr_t) c0 + cn_stride);
- c1 = (int8_t*) ((uintptr_t) c1 + cn_stride);
- c2 = (int8_t*) ((uintptr_t) c2 + cn_stride);
- c3 = (int8_t*) ((uintptr_t) c3 + cn_stride);
-
- nc -= 2;
- } else {
- if (nc & 1) {
- c0[0] = (int8_t) vout0x0;
- c1[0] = (int8_t) vout1x0;
- c2[0] = (int8_t) vout2x0;
- c3[0] = (int8_t) vout3x0;
- }
-
- nc = 0;
- }
- } while (nc != 0);
-}
diff --git a/src/qs8-gemm/gen/4x4-minmax-fp32-scalar-magic.c b/src/qs8-gemm/gen/4x4-minmax-fp32-scalar-fmagic.c
similarity index 96%
rename from src/qs8-gemm/gen/4x4-minmax-fp32-scalar-magic.c
rename to src/qs8-gemm/gen/4x4-minmax-fp32-scalar-fmagic.c
index f2c3231..b47c167 100644
--- a/src/qs8-gemm/gen/4x4-minmax-fp32-scalar-magic.c
+++ b/src/qs8-gemm/gen/4x4-minmax-fp32-scalar-fmagic.c
@@ -15,7 +15,7 @@
#include <xnnpack/gemm.h>
-void xnn_qs8_gemm_minmax_fp32_ukernel_4x4__scalar_magic(
+void xnn_qs8_gemm_minmax_fp32_ukernel_4x4__scalar_fmagic(
size_t mr,
size_t nc,
size_t kc,
@@ -122,7 +122,7 @@
float vfpacc3x2 = (float) vacc3x2;
float vfpacc3x3 = (float) vacc3x3;
- const float vscale = params->fp32_scalar_magic.scale;
+ const float vscale = params->fp32_scalar_fmagic.scale;
vfpacc0x0 *= vscale;
vfpacc0x1 *= vscale;
vfpacc0x2 *= vscale;
@@ -140,7 +140,7 @@
vfpacc3x2 *= vscale;
vfpacc3x3 *= vscale;
- const float voutput_min_less_zero_point = params->fp32_scalar_magic.output_min_less_zero_point;
+ const float voutput_min_less_zero_point = params->fp32_scalar_fmagic.output_min_less_zero_point;
vfpacc0x0 = math_max_f32(vfpacc0x0, voutput_min_less_zero_point);
vfpacc0x1 = math_max_f32(vfpacc0x1, voutput_min_less_zero_point);
vfpacc0x2 = math_max_f32(vfpacc0x2, voutput_min_less_zero_point);
@@ -158,7 +158,7 @@
vfpacc3x2 = math_max_f32(vfpacc3x2, voutput_min_less_zero_point);
vfpacc3x3 = math_max_f32(vfpacc3x3, voutput_min_less_zero_point);
- const float voutput_max_less_zero_point = params->fp32_scalar_magic.output_max_less_zero_point;
+ const float voutput_max_less_zero_point = params->fp32_scalar_fmagic.output_max_less_zero_point;
vfpacc0x0 = math_min_f32(vfpacc0x0, voutput_max_less_zero_point);
vfpacc0x1 = math_min_f32(vfpacc0x1, voutput_max_less_zero_point);
vfpacc0x2 = math_min_f32(vfpacc0x2, voutput_max_less_zero_point);
@@ -176,7 +176,7 @@
vfpacc3x2 = math_min_f32(vfpacc3x2, voutput_max_less_zero_point);
vfpacc3x3 = math_min_f32(vfpacc3x3, voutput_max_less_zero_point);
- const float vmagic_bias = params->fp32_scalar_magic.magic_bias;
+ const float vmagic_bias = params->fp32_scalar_fmagic.magic_bias;
vfpacc0x0 += vmagic_bias;
vfpacc0x1 += vmagic_bias;
vfpacc0x2 += vmagic_bias;
@@ -194,7 +194,7 @@
vfpacc3x2 += vmagic_bias;
vfpacc3x3 += vmagic_bias;
- const int32_t vmagic_bias_less_output_zero_point = params->fp32_scalar_magic.magic_bias_less_output_zero_point;
+ const int32_t vmagic_bias_less_output_zero_point = params->fp32_scalar_fmagic.magic_bias_less_output_zero_point;
int32_t vout0x0 = (int32_t) fp32_to_bits(vfpacc0x0) - vmagic_bias_less_output_zero_point;
int32_t vout0x1 = (int32_t) fp32_to_bits(vfpacc0x1) - vmagic_bias_less_output_zero_point;
int32_t vout0x2 = (int32_t) fp32_to_bits(vfpacc0x2) - vmagic_bias_less_output_zero_point;
diff --git a/src/qs8-gemm/gen/4x4-minmax-fp32-scalar-lrint.c b/src/qs8-gemm/gen/4x4-minmax-fp32-scalar-lrint.c
deleted file mode 100644
index 3744d2b..0000000
--- a/src/qs8-gemm/gen/4x4-minmax-fp32-scalar-lrint.c
+++ /dev/null
@@ -1,271 +0,0 @@
-// Auto-generated file. Do not edit!
-// Template: src/qs8-gemm/scalar.c.in
-// Generator: tools/xngen
-//
-// Copyright 2021 Google LLC
-//
-// This source code is licensed under the BSD-style license found in the
-// LICENSE file in the root directory of this source tree.
-
-#include <assert.h>
-#include <math.h>
-
-#include <xnnpack/math.h>
-#include <xnnpack/gemm.h>
-
-
-void xnn_qs8_gemm_minmax_fp32_ukernel_4x4__scalar_lrint(
- size_t mr,
- size_t nc,
- size_t kc,
- const int8_t* restrict a,
- size_t a_stride,
- const void* restrict w,
- int8_t* restrict c,
- size_t cm_stride,
- size_t cn_stride,
- const union xnn_qs8_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
-{
- assert(mr != 0);
- assert(mr <= 4);
- assert(nc != 0);
- assert(kc != 0);
-
- const int8_t* a0 = a;
- int8_t* c0 = c;
- const int8_t* a1 = (const int8_t*) ((uintptr_t) a0 + a_stride);
- int8_t* c1 = (int8_t*) ((uintptr_t) c0 + cm_stride);
- if XNN_UNPREDICTABLE(mr < 2) {
- a1 = a0;
- c1 = c0;
- }
- const int8_t* a2 = (const int8_t*) ((uintptr_t) a1 + a_stride);
- int8_t* c2 = (int8_t*) ((uintptr_t) c1 + cm_stride);
- if XNN_UNPREDICTABLE(mr <= 2) {
- a2 = a1;
- c2 = c1;
- }
- const int8_t* a3 = (const int8_t*) ((uintptr_t) a2 + a_stride);
- int8_t* c3 = (int8_t*) ((uintptr_t) c2 + cm_stride);
- if XNN_UNPREDICTABLE(mr != 4) {
- a3 = a2;
- c3 = c2;
- }
-
- do {
- int32_t vacc0x0 = ((const int32_t*) w)[0];
- int32_t vacc0x1 = ((const int32_t*) w)[1];
- int32_t vacc0x2 = ((const int32_t*) w)[2];
- int32_t vacc0x3 = ((const int32_t*) w)[3];
- int32_t vacc1x0 = vacc0x0;
- int32_t vacc1x1 = vacc0x1;
- int32_t vacc1x2 = vacc0x2;
- int32_t vacc1x3 = vacc0x3;
- int32_t vacc2x0 = vacc0x0;
- int32_t vacc2x1 = vacc0x1;
- int32_t vacc2x2 = vacc0x2;
- int32_t vacc2x3 = vacc0x3;
- int32_t vacc3x0 = vacc0x0;
- int32_t vacc3x1 = vacc0x1;
- int32_t vacc3x2 = vacc0x2;
- int32_t vacc3x3 = vacc0x3;
- w = (const void*) ((const int32_t*) w + 4);
-
- size_t k = kc;
- do {
- const int32_t va0 = (int32_t) *a0++;
- const int32_t va1 = (int32_t) *a1++;
- const int32_t va2 = (int32_t) *a2++;
- const int32_t va3 = (int32_t) *a3++;
-
- const int32_t vb0 = (int32_t) ((const int8_t*) w)[0];
- const int32_t vb1 = (int32_t) ((const int8_t*) w)[1];
- const int32_t vb2 = (int32_t) ((const int8_t*) w)[2];
- const int32_t vb3 = (int32_t) ((const int8_t*) w)[3];
- w = (const void*) ((const int8_t*) w + 4);
-
- vacc0x0 += va0 * vb0;
- vacc0x1 += va0 * vb1;
- vacc0x2 += va0 * vb2;
- vacc0x3 += va0 * vb3;
- vacc1x0 += va1 * vb0;
- vacc1x1 += va1 * vb1;
- vacc1x2 += va1 * vb2;
- vacc1x3 += va1 * vb3;
- vacc2x0 += va2 * vb0;
- vacc2x1 += va2 * vb1;
- vacc2x2 += va2 * vb2;
- vacc2x3 += va2 * vb3;
- vacc3x0 += va3 * vb0;
- vacc3x1 += va3 * vb1;
- vacc3x2 += va3 * vb2;
- vacc3x3 += va3 * vb3;
-
- k -= sizeof(int8_t);
- } while (k != 0);
-
- float vfpacc0x0 = (float) vacc0x0;
- float vfpacc0x1 = (float) vacc0x1;
- float vfpacc0x2 = (float) vacc0x2;
- float vfpacc0x3 = (float) vacc0x3;
- float vfpacc1x0 = (float) vacc1x0;
- float vfpacc1x1 = (float) vacc1x1;
- float vfpacc1x2 = (float) vacc1x2;
- float vfpacc1x3 = (float) vacc1x3;
- float vfpacc2x0 = (float) vacc2x0;
- float vfpacc2x1 = (float) vacc2x1;
- float vfpacc2x2 = (float) vacc2x2;
- float vfpacc2x3 = (float) vacc2x3;
- float vfpacc3x0 = (float) vacc3x0;
- float vfpacc3x1 = (float) vacc3x1;
- float vfpacc3x2 = (float) vacc3x2;
- float vfpacc3x3 = (float) vacc3x3;
-
- const float vscale = params->fp32_scalar_lrint.scale;
- vfpacc0x0 *= vscale;
- vfpacc0x1 *= vscale;
- vfpacc0x2 *= vscale;
- vfpacc0x3 *= vscale;
- vfpacc1x0 *= vscale;
- vfpacc1x1 *= vscale;
- vfpacc1x2 *= vscale;
- vfpacc1x3 *= vscale;
- vfpacc2x0 *= vscale;
- vfpacc2x1 *= vscale;
- vfpacc2x2 *= vscale;
- vfpacc2x3 *= vscale;
- vfpacc3x0 *= vscale;
- vfpacc3x1 *= vscale;
- vfpacc3x2 *= vscale;
- vfpacc3x3 *= vscale;
-
- long vrndacc0x0 = lrintf(vfpacc0x0);
- long vrndacc0x1 = lrintf(vfpacc0x1);
- long vrndacc0x2 = lrintf(vfpacc0x2);
- long vrndacc0x3 = lrintf(vfpacc0x3);
- long vrndacc1x0 = lrintf(vfpacc1x0);
- long vrndacc1x1 = lrintf(vfpacc1x1);
- long vrndacc1x2 = lrintf(vfpacc1x2);
- long vrndacc1x3 = lrintf(vfpacc1x3);
- long vrndacc2x0 = lrintf(vfpacc2x0);
- long vrndacc2x1 = lrintf(vfpacc2x1);
- long vrndacc2x2 = lrintf(vfpacc2x2);
- long vrndacc2x3 = lrintf(vfpacc2x3);
- long vrndacc3x0 = lrintf(vfpacc3x0);
- long vrndacc3x1 = lrintf(vfpacc3x1);
- long vrndacc3x2 = lrintf(vfpacc3x2);
- long vrndacc3x3 = lrintf(vfpacc3x3);
-
- const long voutput_min_less_zero_point = params->fp32_scalar_lrint.output_min_less_zero_point;
- vrndacc0x0 = XNN_UNPREDICTABLE(vrndacc0x0 < voutput_min_less_zero_point) ? voutput_min_less_zero_point : vrndacc0x0;
- vrndacc0x1 = XNN_UNPREDICTABLE(vrndacc0x1 < voutput_min_less_zero_point) ? voutput_min_less_zero_point : vrndacc0x1;
- vrndacc0x2 = XNN_UNPREDICTABLE(vrndacc0x2 < voutput_min_less_zero_point) ? voutput_min_less_zero_point : vrndacc0x2;
- vrndacc0x3 = XNN_UNPREDICTABLE(vrndacc0x3 < voutput_min_less_zero_point) ? voutput_min_less_zero_point : vrndacc0x3;
- vrndacc1x0 = XNN_UNPREDICTABLE(vrndacc1x0 < voutput_min_less_zero_point) ? voutput_min_less_zero_point : vrndacc1x0;
- vrndacc1x1 = XNN_UNPREDICTABLE(vrndacc1x1 < voutput_min_less_zero_point) ? voutput_min_less_zero_point : vrndacc1x1;
- vrndacc1x2 = XNN_UNPREDICTABLE(vrndacc1x2 < voutput_min_less_zero_point) ? voutput_min_less_zero_point : vrndacc1x2;
- vrndacc1x3 = XNN_UNPREDICTABLE(vrndacc1x3 < voutput_min_less_zero_point) ? voutput_min_less_zero_point : vrndacc1x3;
- vrndacc2x0 = XNN_UNPREDICTABLE(vrndacc2x0 < voutput_min_less_zero_point) ? voutput_min_less_zero_point : vrndacc2x0;
- vrndacc2x1 = XNN_UNPREDICTABLE(vrndacc2x1 < voutput_min_less_zero_point) ? voutput_min_less_zero_point : vrndacc2x1;
- vrndacc2x2 = XNN_UNPREDICTABLE(vrndacc2x2 < voutput_min_less_zero_point) ? voutput_min_less_zero_point : vrndacc2x2;
- vrndacc2x3 = XNN_UNPREDICTABLE(vrndacc2x3 < voutput_min_less_zero_point) ? voutput_min_less_zero_point : vrndacc2x3;
- vrndacc3x0 = XNN_UNPREDICTABLE(vrndacc3x0 < voutput_min_less_zero_point) ? voutput_min_less_zero_point : vrndacc3x0;
- vrndacc3x1 = XNN_UNPREDICTABLE(vrndacc3x1 < voutput_min_less_zero_point) ? voutput_min_less_zero_point : vrndacc3x1;
- vrndacc3x2 = XNN_UNPREDICTABLE(vrndacc3x2 < voutput_min_less_zero_point) ? voutput_min_less_zero_point : vrndacc3x2;
- vrndacc3x3 = XNN_UNPREDICTABLE(vrndacc3x3 < voutput_min_less_zero_point) ? voutput_min_less_zero_point : vrndacc3x3;
-
- const long voutput_max_less_zero_point = params->fp32_scalar_lrint.output_max_less_zero_point;
- vrndacc0x0 = XNN_UNPREDICTABLE(vrndacc0x0 > voutput_max_less_zero_point) ? voutput_max_less_zero_point : vrndacc0x0;
- vrndacc0x1 = XNN_UNPREDICTABLE(vrndacc0x1 > voutput_max_less_zero_point) ? voutput_max_less_zero_point : vrndacc0x1;
- vrndacc0x2 = XNN_UNPREDICTABLE(vrndacc0x2 > voutput_max_less_zero_point) ? voutput_max_less_zero_point : vrndacc0x2;
- vrndacc0x3 = XNN_UNPREDICTABLE(vrndacc0x3 > voutput_max_less_zero_point) ? voutput_max_less_zero_point : vrndacc0x3;
- vrndacc1x0 = XNN_UNPREDICTABLE(vrndacc1x0 > voutput_max_less_zero_point) ? voutput_max_less_zero_point : vrndacc1x0;
- vrndacc1x1 = XNN_UNPREDICTABLE(vrndacc1x1 > voutput_max_less_zero_point) ? voutput_max_less_zero_point : vrndacc1x1;
- vrndacc1x2 = XNN_UNPREDICTABLE(vrndacc1x2 > voutput_max_less_zero_point) ? voutput_max_less_zero_point : vrndacc1x2;
- vrndacc1x3 = XNN_UNPREDICTABLE(vrndacc1x3 > voutput_max_less_zero_point) ? voutput_max_less_zero_point : vrndacc1x3;
- vrndacc2x0 = XNN_UNPREDICTABLE(vrndacc2x0 > voutput_max_less_zero_point) ? voutput_max_less_zero_point : vrndacc2x0;
- vrndacc2x1 = XNN_UNPREDICTABLE(vrndacc2x1 > voutput_max_less_zero_point) ? voutput_max_less_zero_point : vrndacc2x1;
- vrndacc2x2 = XNN_UNPREDICTABLE(vrndacc2x2 > voutput_max_less_zero_point) ? voutput_max_less_zero_point : vrndacc2x2;
- vrndacc2x3 = XNN_UNPREDICTABLE(vrndacc2x3 > voutput_max_less_zero_point) ? voutput_max_less_zero_point : vrndacc2x3;
- vrndacc3x0 = XNN_UNPREDICTABLE(vrndacc3x0 > voutput_max_less_zero_point) ? voutput_max_less_zero_point : vrndacc3x0;
- vrndacc3x1 = XNN_UNPREDICTABLE(vrndacc3x1 > voutput_max_less_zero_point) ? voutput_max_less_zero_point : vrndacc3x1;
- vrndacc3x2 = XNN_UNPREDICTABLE(vrndacc3x2 > voutput_max_less_zero_point) ? voutput_max_less_zero_point : vrndacc3x2;
- vrndacc3x3 = XNN_UNPREDICTABLE(vrndacc3x3 > voutput_max_less_zero_point) ? voutput_max_less_zero_point : vrndacc3x3;
-
- const int32_t voutput_zero_point = params->fp32_scalar_lrint.output_zero_point;
- int32_t vout0x0 = (int32_t) vrndacc0x0 + voutput_zero_point;
- int32_t vout0x1 = (int32_t) vrndacc0x1 + voutput_zero_point;
- int32_t vout0x2 = (int32_t) vrndacc0x2 + voutput_zero_point;
- int32_t vout0x3 = (int32_t) vrndacc0x3 + voutput_zero_point;
- int32_t vout1x0 = (int32_t) vrndacc1x0 + voutput_zero_point;
- int32_t vout1x1 = (int32_t) vrndacc1x1 + voutput_zero_point;
- int32_t vout1x2 = (int32_t) vrndacc1x2 + voutput_zero_point;
- int32_t vout1x3 = (int32_t) vrndacc1x3 + voutput_zero_point;
- int32_t vout2x0 = (int32_t) vrndacc2x0 + voutput_zero_point;
- int32_t vout2x1 = (int32_t) vrndacc2x1 + voutput_zero_point;
- int32_t vout2x2 = (int32_t) vrndacc2x2 + voutput_zero_point;
- int32_t vout2x3 = (int32_t) vrndacc2x3 + voutput_zero_point;
- int32_t vout3x0 = (int32_t) vrndacc3x0 + voutput_zero_point;
- int32_t vout3x1 = (int32_t) vrndacc3x1 + voutput_zero_point;
- int32_t vout3x2 = (int32_t) vrndacc3x2 + voutput_zero_point;
- int32_t vout3x3 = (int32_t) vrndacc3x3 + voutput_zero_point;
-
- if XNN_LIKELY(nc >= 4) {
- c0[0] = (int8_t) vout0x0;
- c0[1] = (int8_t) vout0x1;
- c0[2] = (int8_t) vout0x2;
- c0[3] = (int8_t) vout0x3;
- c1[0] = (int8_t) vout1x0;
- c1[1] = (int8_t) vout1x1;
- c1[2] = (int8_t) vout1x2;
- c1[3] = (int8_t) vout1x3;
- c2[0] = (int8_t) vout2x0;
- c2[1] = (int8_t) vout2x1;
- c2[2] = (int8_t) vout2x2;
- c2[3] = (int8_t) vout2x3;
- c3[0] = (int8_t) vout3x0;
- c3[1] = (int8_t) vout3x1;
- c3[2] = (int8_t) vout3x2;
- c3[3] = (int8_t) vout3x3;
-
- a0 = (const int8_t*) ((uintptr_t) a0 - kc);
- a1 = (const int8_t*) ((uintptr_t) a1 - kc);
- a2 = (const int8_t*) ((uintptr_t) a2 - kc);
- a3 = (const int8_t*) ((uintptr_t) a3 - kc);
-
- c0 = (int8_t*) ((uintptr_t) c0 + cn_stride);
- c1 = (int8_t*) ((uintptr_t) c1 + cn_stride);
- c2 = (int8_t*) ((uintptr_t) c2 + cn_stride);
- c3 = (int8_t*) ((uintptr_t) c3 + cn_stride);
-
- nc -= 4;
- } else {
- if (nc & 2) {
- c0[0] = (int8_t) vout0x0;
- c0[1] = (int8_t) vout0x1;
- vout0x0 = vout0x2;
- c0 += 2;
- c1[0] = (int8_t) vout1x0;
- c1[1] = (int8_t) vout1x1;
- vout1x0 = vout1x2;
- c1 += 2;
- c2[0] = (int8_t) vout2x0;
- c2[1] = (int8_t) vout2x1;
- vout2x0 = vout2x2;
- c2 += 2;
- c3[0] = (int8_t) vout3x0;
- c3[1] = (int8_t) vout3x1;
- vout3x0 = vout3x2;
- c3 += 2;
- }
- if (nc & 1) {
- c0[0] = (int8_t) vout0x0;
- c1[0] = (int8_t) vout1x0;
- c2[0] = (int8_t) vout2x0;
- c3[0] = (int8_t) vout3x0;
- }
-
- nc = 0;
- }
- } while (nc != 0);
-}
diff --git a/src/qs8-gemm/gen/4x4-minmax-fp32-scalar-magic.c b/src/qs8-gemm/gen/4x4-minmax-fp32-scalar-lrintf.c
similarity index 76%
copy from src/qs8-gemm/gen/4x4-minmax-fp32-scalar-magic.c
copy to src/qs8-gemm/gen/4x4-minmax-fp32-scalar-lrintf.c
index f2c3231..9f12f10 100644
--- a/src/qs8-gemm/gen/4x4-minmax-fp32-scalar-magic.c
+++ b/src/qs8-gemm/gen/4x4-minmax-fp32-scalar-lrintf.c
@@ -8,14 +8,13 @@
// LICENSE file in the root directory of this source tree.
#include <assert.h>
-
-#include <fp16.h>
+#include <math.h>
#include <xnnpack/math.h>
#include <xnnpack/gemm.h>
-void xnn_qs8_gemm_minmax_fp32_ukernel_4x4__scalar_magic(
+void xnn_qs8_gemm_minmax_fp32_ukernel_4x4__scalar_lrintf(
size_t mr,
size_t nc,
size_t kc,
@@ -122,7 +121,7 @@
float vfpacc3x2 = (float) vacc3x2;
float vfpacc3x3 = (float) vacc3x3;
- const float vscale = params->fp32_scalar_magic.scale;
+ const float vscale = params->fp32_scalar_lrintf.scale;
vfpacc0x0 *= vscale;
vfpacc0x1 *= vscale;
vfpacc0x2 *= vscale;
@@ -140,7 +139,7 @@
vfpacc3x2 *= vscale;
vfpacc3x3 *= vscale;
- const float voutput_min_less_zero_point = params->fp32_scalar_magic.output_min_less_zero_point;
+ const float voutput_min_less_zero_point = params->fp32_scalar_lrintf.output_min_less_zero_point;
vfpacc0x0 = math_max_f32(vfpacc0x0, voutput_min_less_zero_point);
vfpacc0x1 = math_max_f32(vfpacc0x1, voutput_min_less_zero_point);
vfpacc0x2 = math_max_f32(vfpacc0x2, voutput_min_less_zero_point);
@@ -158,7 +157,7 @@
vfpacc3x2 = math_max_f32(vfpacc3x2, voutput_min_less_zero_point);
vfpacc3x3 = math_max_f32(vfpacc3x3, voutput_min_less_zero_point);
- const float voutput_max_less_zero_point = params->fp32_scalar_magic.output_max_less_zero_point;
+ const float voutput_max_less_zero_point = params->fp32_scalar_lrintf.output_max_less_zero_point;
vfpacc0x0 = math_min_f32(vfpacc0x0, voutput_max_less_zero_point);
vfpacc0x1 = math_min_f32(vfpacc0x1, voutput_max_less_zero_point);
vfpacc0x2 = math_min_f32(vfpacc0x2, voutput_max_less_zero_point);
@@ -176,41 +175,40 @@
vfpacc3x2 = math_min_f32(vfpacc3x2, voutput_max_less_zero_point);
vfpacc3x3 = math_min_f32(vfpacc3x3, voutput_max_less_zero_point);
- const float vmagic_bias = params->fp32_scalar_magic.magic_bias;
- vfpacc0x0 += vmagic_bias;
- vfpacc0x1 += vmagic_bias;
- vfpacc0x2 += vmagic_bias;
- vfpacc0x3 += vmagic_bias;
- vfpacc1x0 += vmagic_bias;
- vfpacc1x1 += vmagic_bias;
- vfpacc1x2 += vmagic_bias;
- vfpacc1x3 += vmagic_bias;
- vfpacc2x0 += vmagic_bias;
- vfpacc2x1 += vmagic_bias;
- vfpacc2x2 += vmagic_bias;
- vfpacc2x3 += vmagic_bias;
- vfpacc3x0 += vmagic_bias;
- vfpacc3x1 += vmagic_bias;
- vfpacc3x2 += vmagic_bias;
- vfpacc3x3 += vmagic_bias;
+ const int32_t vrndacc0x0 = (int32_t) lrintf(vfpacc0x0);
+ const int32_t vrndacc0x1 = (int32_t) lrintf(vfpacc0x1);
+ const int32_t vrndacc0x2 = (int32_t) lrintf(vfpacc0x2);
+ const int32_t vrndacc0x3 = (int32_t) lrintf(vfpacc0x3);
+ const int32_t vrndacc1x0 = (int32_t) lrintf(vfpacc1x0);
+ const int32_t vrndacc1x1 = (int32_t) lrintf(vfpacc1x1);
+ const int32_t vrndacc1x2 = (int32_t) lrintf(vfpacc1x2);
+ const int32_t vrndacc1x3 = (int32_t) lrintf(vfpacc1x3);
+ const int32_t vrndacc2x0 = (int32_t) lrintf(vfpacc2x0);
+ const int32_t vrndacc2x1 = (int32_t) lrintf(vfpacc2x1);
+ const int32_t vrndacc2x2 = (int32_t) lrintf(vfpacc2x2);
+ const int32_t vrndacc2x3 = (int32_t) lrintf(vfpacc2x3);
+ const int32_t vrndacc3x0 = (int32_t) lrintf(vfpacc3x0);
+ const int32_t vrndacc3x1 = (int32_t) lrintf(vfpacc3x1);
+ const int32_t vrndacc3x2 = (int32_t) lrintf(vfpacc3x2);
+ const int32_t vrndacc3x3 = (int32_t) lrintf(vfpacc3x3);
- const int32_t vmagic_bias_less_output_zero_point = params->fp32_scalar_magic.magic_bias_less_output_zero_point;
- int32_t vout0x0 = (int32_t) fp32_to_bits(vfpacc0x0) - vmagic_bias_less_output_zero_point;
- int32_t vout0x1 = (int32_t) fp32_to_bits(vfpacc0x1) - vmagic_bias_less_output_zero_point;
- int32_t vout0x2 = (int32_t) fp32_to_bits(vfpacc0x2) - vmagic_bias_less_output_zero_point;
- int32_t vout0x3 = (int32_t) fp32_to_bits(vfpacc0x3) - vmagic_bias_less_output_zero_point;
- int32_t vout1x0 = (int32_t) fp32_to_bits(vfpacc1x0) - vmagic_bias_less_output_zero_point;
- int32_t vout1x1 = (int32_t) fp32_to_bits(vfpacc1x1) - vmagic_bias_less_output_zero_point;
- int32_t vout1x2 = (int32_t) fp32_to_bits(vfpacc1x2) - vmagic_bias_less_output_zero_point;
- int32_t vout1x3 = (int32_t) fp32_to_bits(vfpacc1x3) - vmagic_bias_less_output_zero_point;
- int32_t vout2x0 = (int32_t) fp32_to_bits(vfpacc2x0) - vmagic_bias_less_output_zero_point;
- int32_t vout2x1 = (int32_t) fp32_to_bits(vfpacc2x1) - vmagic_bias_less_output_zero_point;
- int32_t vout2x2 = (int32_t) fp32_to_bits(vfpacc2x2) - vmagic_bias_less_output_zero_point;
- int32_t vout2x3 = (int32_t) fp32_to_bits(vfpacc2x3) - vmagic_bias_less_output_zero_point;
- int32_t vout3x0 = (int32_t) fp32_to_bits(vfpacc3x0) - vmagic_bias_less_output_zero_point;
- int32_t vout3x1 = (int32_t) fp32_to_bits(vfpacc3x1) - vmagic_bias_less_output_zero_point;
- int32_t vout3x2 = (int32_t) fp32_to_bits(vfpacc3x2) - vmagic_bias_less_output_zero_point;
- int32_t vout3x3 = (int32_t) fp32_to_bits(vfpacc3x3) - vmagic_bias_less_output_zero_point;
+ const int32_t voutput_zero_point = params->fp32_scalar_lrintf.output_zero_point;
+ int32_t vout0x0 = vrndacc0x0 + voutput_zero_point;
+ int32_t vout0x1 = vrndacc0x1 + voutput_zero_point;
+ int32_t vout0x2 = vrndacc0x2 + voutput_zero_point;
+ int32_t vout0x3 = vrndacc0x3 + voutput_zero_point;
+ int32_t vout1x0 = vrndacc1x0 + voutput_zero_point;
+ int32_t vout1x1 = vrndacc1x1 + voutput_zero_point;
+ int32_t vout1x2 = vrndacc1x2 + voutput_zero_point;
+ int32_t vout1x3 = vrndacc1x3 + voutput_zero_point;
+ int32_t vout2x0 = vrndacc2x0 + voutput_zero_point;
+ int32_t vout2x1 = vrndacc2x1 + voutput_zero_point;
+ int32_t vout2x2 = vrndacc2x2 + voutput_zero_point;
+ int32_t vout2x3 = vrndacc2x3 + voutput_zero_point;
+ int32_t vout3x0 = vrndacc3x0 + voutput_zero_point;
+ int32_t vout3x1 = vrndacc3x1 + voutput_zero_point;
+ int32_t vout3x2 = vrndacc3x2 + voutput_zero_point;
+ int32_t vout3x3 = vrndacc3x3 + voutput_zero_point;
if XNN_LIKELY(nc >= 4) {
c0[0] = (int8_t) vout0x0;
diff --git a/src/qs8-gemm/gen/4x4-minmax-rndnu-scalar.c b/src/qs8-gemm/gen/4x4-minmax-rndnu-scalar.c
deleted file mode 100644
index 98fa52a..0000000
--- a/src/qs8-gemm/gen/4x4-minmax-rndnu-scalar.c
+++ /dev/null
@@ -1,255 +0,0 @@
-// Auto-generated file. Do not edit!
-// Template: src/qs8-gemm/scalar.c.in
-// Generator: tools/xngen
-//
-// Copyright 2021 Google LLC
-//
-// This source code is licensed under the BSD-style license found in the
-// LICENSE file in the root directory of this source tree.
-
-#include <assert.h>
-
-#include <xnnpack/math.h>
-#include <xnnpack/gemm.h>
-
-
-void xnn_qs8_gemm_minmax_rndnu_ukernel_4x4__scalar(
- size_t mr,
- size_t nc,
- size_t kc,
- const int8_t* restrict a,
- size_t a_stride,
- const void* restrict w,
- int8_t* restrict c,
- size_t cm_stride,
- size_t cn_stride,
- const union xnn_qs8_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
-{
- assert(mr != 0);
- assert(mr <= 4);
- assert(nc != 0);
- assert(kc != 0);
-
- const int8_t* a0 = a;
- int8_t* c0 = c;
- const int8_t* a1 = (const int8_t*) ((uintptr_t) a0 + a_stride);
- int8_t* c1 = (int8_t*) ((uintptr_t) c0 + cm_stride);
- if XNN_UNPREDICTABLE(mr < 2) {
- a1 = a0;
- c1 = c0;
- }
- const int8_t* a2 = (const int8_t*) ((uintptr_t) a1 + a_stride);
- int8_t* c2 = (int8_t*) ((uintptr_t) c1 + cm_stride);
- if XNN_UNPREDICTABLE(mr <= 2) {
- a2 = a1;
- c2 = c1;
- }
- const int8_t* a3 = (const int8_t*) ((uintptr_t) a2 + a_stride);
- int8_t* c3 = (int8_t*) ((uintptr_t) c2 + cm_stride);
- if XNN_UNPREDICTABLE(mr != 4) {
- a3 = a2;
- c3 = c2;
- }
-
- do {
- int32_t vacc0x0 = ((const int32_t*) w)[0];
- int32_t vacc0x1 = ((const int32_t*) w)[1];
- int32_t vacc0x2 = ((const int32_t*) w)[2];
- int32_t vacc0x3 = ((const int32_t*) w)[3];
- int32_t vacc1x0 = vacc0x0;
- int32_t vacc1x1 = vacc0x1;
- int32_t vacc1x2 = vacc0x2;
- int32_t vacc1x3 = vacc0x3;
- int32_t vacc2x0 = vacc0x0;
- int32_t vacc2x1 = vacc0x1;
- int32_t vacc2x2 = vacc0x2;
- int32_t vacc2x3 = vacc0x3;
- int32_t vacc3x0 = vacc0x0;
- int32_t vacc3x1 = vacc0x1;
- int32_t vacc3x2 = vacc0x2;
- int32_t vacc3x3 = vacc0x3;
- w = (const void*) ((const int32_t*) w + 4);
-
- size_t k = kc;
- do {
- const int32_t va0 = (int32_t) *a0++;
- const int32_t va1 = (int32_t) *a1++;
- const int32_t va2 = (int32_t) *a2++;
- const int32_t va3 = (int32_t) *a3++;
-
- const int32_t vb0 = (int32_t) ((const int8_t*) w)[0];
- const int32_t vb1 = (int32_t) ((const int8_t*) w)[1];
- const int32_t vb2 = (int32_t) ((const int8_t*) w)[2];
- const int32_t vb3 = (int32_t) ((const int8_t*) w)[3];
- w = (const void*) ((const int8_t*) w + 4);
-
- vacc0x0 += va0 * vb0;
- vacc0x1 += va0 * vb1;
- vacc0x2 += va0 * vb2;
- vacc0x3 += va0 * vb3;
- vacc1x0 += va1 * vb0;
- vacc1x1 += va1 * vb1;
- vacc1x2 += va1 * vb2;
- vacc1x3 += va1 * vb3;
- vacc2x0 += va2 * vb0;
- vacc2x1 += va2 * vb1;
- vacc2x2 += va2 * vb2;
- vacc2x3 += va2 * vb3;
- vacc3x0 += va3 * vb0;
- vacc3x1 += va3 * vb1;
- vacc3x2 += va3 * vb2;
- vacc3x3 += va3 * vb3;
-
- k -= sizeof(int8_t);
- } while (k != 0);
-
- const int32_t vmultiplier = params->rndnu_scalar.multiplier;
- const int64_t vproduct0x0 = (int64_t) vacc0x0 * (int64_t) vmultiplier;
- const int64_t vproduct0x1 = (int64_t) vacc0x1 * (int64_t) vmultiplier;
- const int64_t vproduct0x2 = (int64_t) vacc0x2 * (int64_t) vmultiplier;
- const int64_t vproduct0x3 = (int64_t) vacc0x3 * (int64_t) vmultiplier;
- const int64_t vproduct1x0 = (int64_t) vacc1x0 * (int64_t) vmultiplier;
- const int64_t vproduct1x1 = (int64_t) vacc1x1 * (int64_t) vmultiplier;
- const int64_t vproduct1x2 = (int64_t) vacc1x2 * (int64_t) vmultiplier;
- const int64_t vproduct1x3 = (int64_t) vacc1x3 * (int64_t) vmultiplier;
- const int64_t vproduct2x0 = (int64_t) vacc2x0 * (int64_t) vmultiplier;
- const int64_t vproduct2x1 = (int64_t) vacc2x1 * (int64_t) vmultiplier;
- const int64_t vproduct2x2 = (int64_t) vacc2x2 * (int64_t) vmultiplier;
- const int64_t vproduct2x3 = (int64_t) vacc2x3 * (int64_t) vmultiplier;
- const int64_t vproduct3x0 = (int64_t) vacc3x0 * (int64_t) vmultiplier;
- const int64_t vproduct3x1 = (int64_t) vacc3x1 * (int64_t) vmultiplier;
- const int64_t vproduct3x2 = (int64_t) vacc3x2 * (int64_t) vmultiplier;
- const int64_t vproduct3x3 = (int64_t) vacc3x3 * (int64_t) vmultiplier;
-
- const uint32_t shift = params->rndnu_scalar.shift;
- const int64_t rounding = params->rndnu_scalar.rounding;
- int32_t vout0x0 = (int32_t) asr_s64(vproduct0x0 + rounding, shift);
- int32_t vout0x1 = (int32_t) asr_s64(vproduct0x1 + rounding, shift);
- int32_t vout0x2 = (int32_t) asr_s64(vproduct0x2 + rounding, shift);
- int32_t vout0x3 = (int32_t) asr_s64(vproduct0x3 + rounding, shift);
- int32_t vout1x0 = (int32_t) asr_s64(vproduct1x0 + rounding, shift);
- int32_t vout1x1 = (int32_t) asr_s64(vproduct1x1 + rounding, shift);
- int32_t vout1x2 = (int32_t) asr_s64(vproduct1x2 + rounding, shift);
- int32_t vout1x3 = (int32_t) asr_s64(vproduct1x3 + rounding, shift);
- int32_t vout2x0 = (int32_t) asr_s64(vproduct2x0 + rounding, shift);
- int32_t vout2x1 = (int32_t) asr_s64(vproduct2x1 + rounding, shift);
- int32_t vout2x2 = (int32_t) asr_s64(vproduct2x2 + rounding, shift);
- int32_t vout2x3 = (int32_t) asr_s64(vproduct2x3 + rounding, shift);
- int32_t vout3x0 = (int32_t) asr_s64(vproduct3x0 + rounding, shift);
- int32_t vout3x1 = (int32_t) asr_s64(vproduct3x1 + rounding, shift);
- int32_t vout3x2 = (int32_t) asr_s64(vproduct3x2 + rounding, shift);
- int32_t vout3x3 = (int32_t) asr_s64(vproduct3x3 + rounding, shift);
-
- const int32_t voutput_min_less_zero_point = params->rndnu_scalar.output_min_less_zero_point;
- vout0x0 = math_max_s32(vout0x0, voutput_min_less_zero_point);
- vout0x1 = math_max_s32(vout0x1, voutput_min_less_zero_point);
- vout0x2 = math_max_s32(vout0x2, voutput_min_less_zero_point);
- vout0x3 = math_max_s32(vout0x3, voutput_min_less_zero_point);
- vout1x0 = math_max_s32(vout1x0, voutput_min_less_zero_point);
- vout1x1 = math_max_s32(vout1x1, voutput_min_less_zero_point);
- vout1x2 = math_max_s32(vout1x2, voutput_min_less_zero_point);
- vout1x3 = math_max_s32(vout1x3, voutput_min_less_zero_point);
- vout2x0 = math_max_s32(vout2x0, voutput_min_less_zero_point);
- vout2x1 = math_max_s32(vout2x1, voutput_min_less_zero_point);
- vout2x2 = math_max_s32(vout2x2, voutput_min_less_zero_point);
- vout2x3 = math_max_s32(vout2x3, voutput_min_less_zero_point);
- vout3x0 = math_max_s32(vout3x0, voutput_min_less_zero_point);
- vout3x1 = math_max_s32(vout3x1, voutput_min_less_zero_point);
- vout3x2 = math_max_s32(vout3x2, voutput_min_less_zero_point);
- vout3x3 = math_max_s32(vout3x3, voutput_min_less_zero_point);
-
- const int32_t voutput_max_less_zero_point = params->rndnu_scalar.output_max_less_zero_point;
- vout0x0 = math_min_s32(vout0x0, voutput_max_less_zero_point);
- vout0x1 = math_min_s32(vout0x1, voutput_max_less_zero_point);
- vout0x2 = math_min_s32(vout0x2, voutput_max_less_zero_point);
- vout0x3 = math_min_s32(vout0x3, voutput_max_less_zero_point);
- vout1x0 = math_min_s32(vout1x0, voutput_max_less_zero_point);
- vout1x1 = math_min_s32(vout1x1, voutput_max_less_zero_point);
- vout1x2 = math_min_s32(vout1x2, voutput_max_less_zero_point);
- vout1x3 = math_min_s32(vout1x3, voutput_max_less_zero_point);
- vout2x0 = math_min_s32(vout2x0, voutput_max_less_zero_point);
- vout2x1 = math_min_s32(vout2x1, voutput_max_less_zero_point);
- vout2x2 = math_min_s32(vout2x2, voutput_max_less_zero_point);
- vout2x3 = math_min_s32(vout2x3, voutput_max_less_zero_point);
- vout3x0 = math_min_s32(vout3x0, voutput_max_less_zero_point);
- vout3x1 = math_min_s32(vout3x1, voutput_max_less_zero_point);
- vout3x2 = math_min_s32(vout3x2, voutput_max_less_zero_point);
- vout3x3 = math_min_s32(vout3x3, voutput_max_less_zero_point);
-
- const int32_t voutput_zero_point = params->rndnu_scalar.output_zero_point;
- vout0x0 += voutput_zero_point;
- vout0x1 += voutput_zero_point;
- vout0x2 += voutput_zero_point;
- vout0x3 += voutput_zero_point;
- vout1x0 += voutput_zero_point;
- vout1x1 += voutput_zero_point;
- vout1x2 += voutput_zero_point;
- vout1x3 += voutput_zero_point;
- vout2x0 += voutput_zero_point;
- vout2x1 += voutput_zero_point;
- vout2x2 += voutput_zero_point;
- vout2x3 += voutput_zero_point;
- vout3x0 += voutput_zero_point;
- vout3x1 += voutput_zero_point;
- vout3x2 += voutput_zero_point;
- vout3x3 += voutput_zero_point;
-
- if XNN_LIKELY(nc >= 4) {
- c0[0] = (int8_t) vout0x0;
- c0[1] = (int8_t) vout0x1;
- c0[2] = (int8_t) vout0x2;
- c0[3] = (int8_t) vout0x3;
- c1[0] = (int8_t) vout1x0;
- c1[1] = (int8_t) vout1x1;
- c1[2] = (int8_t) vout1x2;
- c1[3] = (int8_t) vout1x3;
- c2[0] = (int8_t) vout2x0;
- c2[1] = (int8_t) vout2x1;
- c2[2] = (int8_t) vout2x2;
- c2[3] = (int8_t) vout2x3;
- c3[0] = (int8_t) vout3x0;
- c3[1] = (int8_t) vout3x1;
- c3[2] = (int8_t) vout3x2;
- c3[3] = (int8_t) vout3x3;
-
- a0 = (const int8_t*) ((uintptr_t) a0 - kc);
- a1 = (const int8_t*) ((uintptr_t) a1 - kc);
- a2 = (const int8_t*) ((uintptr_t) a2 - kc);
- a3 = (const int8_t*) ((uintptr_t) a3 - kc);
-
- c0 = (int8_t*) ((uintptr_t) c0 + cn_stride);
- c1 = (int8_t*) ((uintptr_t) c1 + cn_stride);
- c2 = (int8_t*) ((uintptr_t) c2 + cn_stride);
- c3 = (int8_t*) ((uintptr_t) c3 + cn_stride);
-
- nc -= 4;
- } else {
- if (nc & 2) {
- c0[0] = (int8_t) vout0x0;
- c0[1] = (int8_t) vout0x1;
- vout0x0 = vout0x2;
- c0 += 2;
- c1[0] = (int8_t) vout1x0;
- c1[1] = (int8_t) vout1x1;
- vout1x0 = vout1x2;
- c1 += 2;
- c2[0] = (int8_t) vout2x0;
- c2[1] = (int8_t) vout2x1;
- vout2x0 = vout2x2;
- c2 += 2;
- c3[0] = (int8_t) vout3x0;
- c3[1] = (int8_t) vout3x1;
- vout3x0 = vout3x2;
- c3 += 2;
- }
- if (nc & 1) {
- c0[0] = (int8_t) vout0x0;
- c1[0] = (int8_t) vout1x0;
- c2[0] = (int8_t) vout2x0;
- c3[0] = (int8_t) vout3x0;
- }
-
- nc = 0;
- }
- } while (nc != 0);
-}
diff --git a/src/qs8-gemm/scalar.c.in b/src/qs8-gemm/scalar.c.in
index 0708695..41e6d31 100644
--- a/src/qs8-gemm/scalar.c.in
+++ b/src/qs8-gemm/scalar.c.in
@@ -3,16 +3,15 @@
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
-$assert REQUANTIZATION in ["FP32", "RNDNU"]
+$assert REQUANTIZATION == "FP32"
+$assert VARIANT in ["FMAGIC", "LRINTF"]
$assert DATATYPE in ["QC8", "QS8", "QU8"]
-$assert DATATYPE != "QC8" or REQUANTIZATION == "FP32"
#include <assert.h>
-$if REQUANTIZATION == "FP32":
- $if VARIANT == "LRINT":
- #include <math.h>
- $elif VARIANT == "MAGIC":
+$if VARIANT == "LRINTF":
+ #include <math.h>
+$elif VARIANT == "FMAGIC":
- #include <fp16.h>
+ #include <fp16.h>
#include <xnnpack/math.h>
#include <xnnpack/gemm.h>
@@ -92,95 +91,68 @@
k -= sizeof(${XINT8_T});
} while (k != 0);
- $if REQUANTIZATION == "RNDNU":
- const int32_t vmultiplier = params->${PARAMS_STRUCT}.multiplier;
- $for M in range(MR):
- $for N in range(NR):
- const int64_t vproduct${M}x${N} = (int64_t) vacc${M}x${N} * (int64_t) vmultiplier;
+ $for M in range(MR):
+ $for N in range(NR):
+ float vfpacc${M}x${N} = (float) vacc${M}x${N};
- const uint32_t shift = params->${PARAMS_STRUCT}.shift;
- const int64_t rounding = params->${PARAMS_STRUCT}.rounding;
+ $if DATATYPE == "QC8":
+ $if NR % 4 != 0:
+ typedef XNN_UNALIGNED float unaligned_float;
+ $for N in range(NR):
+ const float vscale${N} = ((const unaligned_float*) w)[${N}];
+ $for M in range(MR):
+ vfpacc${M}x${N} *= vscale${N};
+ $else:
+ $for N in range(NR):
+ const float vscale${N} = ((const float*) w)[${N}];
+ $for M in range(MR):
+ vfpacc${M}x${N} *= vscale${N};
+ w = (const void*) ((const float*) w + ${NR});
+ $else:
+ const float vscale = params->${PARAMS_STRUCT}.scale;
$for M in range(MR):
$for N in range(NR):
- int32_t vout${M}x${N} = (int32_t) asr_s64(vproduct${M}x${N} + rounding, shift);
+ vfpacc${M}x${N} *= vscale;
- const int32_t voutput_min_less_zero_point = params->${PARAMS_STRUCT}.output_min_less_zero_point;
+ $if VARIANT == "FMAGIC":
+ const float voutput_min_less_zero_point = params->${PARAMS_STRUCT}.output_min_less_zero_point;
$for M in range(MR):
$for N in range(NR):
- vout${M}x${N} = math_max_s32(vout${M}x${N}, voutput_min_less_zero_point);
+ vfpacc${M}x${N} = math_max_f32(vfpacc${M}x${N}, voutput_min_less_zero_point);
- const int32_t voutput_max_less_zero_point = params->${PARAMS_STRUCT}.output_max_less_zero_point;
+ const float voutput_max_less_zero_point = params->${PARAMS_STRUCT}.output_max_less_zero_point;
$for M in range(MR):
$for N in range(NR):
- vout${M}x${N} = math_min_s32(vout${M}x${N}, voutput_max_less_zero_point);
+ vfpacc${M}x${N} = math_min_f32(vfpacc${M}x${N}, voutput_max_less_zero_point);
+
+ const float vmagic_bias = params->${PARAMS_STRUCT}.magic_bias;
+ $for M in range(MR):
+ $for N in range(NR):
+ vfpacc${M}x${N} += vmagic_bias;
+
+ const int32_t vmagic_bias_less_output_zero_point = params->${PARAMS_STRUCT}.magic_bias_less_output_zero_point;
+ $for M in range(MR):
+ $for N in range(NR):
+ int32_t vout${M}x${N} = (int32_t) fp32_to_bits(vfpacc${M}x${N}) - vmagic_bias_less_output_zero_point;
+ $elif VARIANT == "LRINTF":
+ const float voutput_min_less_zero_point = params->${PARAMS_STRUCT}.output_min_less_zero_point;
+ $for M in range(MR):
+ $for N in range(NR):
+ vfpacc${M}x${N} = math_max_f32(vfpacc${M}x${N}, voutput_min_less_zero_point);
+
+ const float voutput_max_less_zero_point = params->${PARAMS_STRUCT}.output_max_less_zero_point;
+ $for M in range(MR):
+ $for N in range(NR):
+ vfpacc${M}x${N} = math_min_f32(vfpacc${M}x${N}, voutput_max_less_zero_point);
+
+ $for M in range(MR):
+ $for N in range(NR):
+ const int32_t vrndacc${M}x${N} = (int32_t) lrintf(vfpacc${M}x${N});
const int32_t voutput_zero_point = params->${PARAMS_STRUCT}.output_zero_point;
$for M in range(MR):
$for N in range(NR):
- vout${M}x${N} += voutput_zero_point;
- $elif REQUANTIZATION == "FP32":
- $for M in range(MR):
- $for N in range(NR):
- float vfpacc${M}x${N} = (float) vacc${M}x${N};
-
- $if DATATYPE == "QC8":
- $if NR % 4 != 0:
- typedef XNN_UNALIGNED float unaligned_float;
- $for N in range(NR):
- const float vscale${N} = ((const unaligned_float*) w)[${N}];
- $for M in range(MR):
- vfpacc${M}x${N} *= vscale${N};
- $else:
- $for N in range(NR):
- const float vscale${N} = ((const float*) w)[${N}];
- $for M in range(MR):
- vfpacc${M}x${N} *= vscale${N};
- w = (const void*) ((const float*) w + ${NR});
- $else:
- const float vscale = params->${PARAMS_STRUCT}.scale;
- $for M in range(MR):
- $for N in range(NR):
- vfpacc${M}x${N} *= vscale;
-
- $if VARIANT == "MAGIC":
- const float voutput_min_less_zero_point = params->${PARAMS_STRUCT}.output_min_less_zero_point;
- $for M in range(MR):
- $for N in range(NR):
- vfpacc${M}x${N} = math_max_f32(vfpacc${M}x${N}, voutput_min_less_zero_point);
-
- const float voutput_max_less_zero_point = params->${PARAMS_STRUCT}.output_max_less_zero_point;
- $for M in range(MR):
- $for N in range(NR):
- vfpacc${M}x${N} = math_min_f32(vfpacc${M}x${N}, voutput_max_less_zero_point);
-
- const float vmagic_bias = params->${PARAMS_STRUCT}.magic_bias;
- $for M in range(MR):
- $for N in range(NR):
- vfpacc${M}x${N} += vmagic_bias;
-
- const int32_t vmagic_bias_less_output_zero_point = params->${PARAMS_STRUCT}.magic_bias_less_output_zero_point;
- $for M in range(MR):
- $for N in range(NR):
- int32_t vout${M}x${N} = (int32_t) fp32_to_bits(vfpacc${M}x${N}) - vmagic_bias_less_output_zero_point;
- $elif VARIANT == "LRINT":
- $for M in range(MR):
- $for N in range(NR):
- long vrndacc${M}x${N} = lrintf(vfpacc${M}x${N});
-
- const long voutput_min_less_zero_point = params->${PARAMS_STRUCT}.output_min_less_zero_point;
- $for M in range(MR):
- $for N in range(NR):
- vrndacc${M}x${N} = XNN_UNPREDICTABLE(vrndacc${M}x${N} < voutput_min_less_zero_point) ? voutput_min_less_zero_point : vrndacc${M}x${N};
-
- const long voutput_max_less_zero_point = params->${PARAMS_STRUCT}.output_max_less_zero_point;
- $for M in range(MR):
- $for N in range(NR):
- vrndacc${M}x${N} = XNN_UNPREDICTABLE(vrndacc${M}x${N} > voutput_max_less_zero_point) ? voutput_max_less_zero_point : vrndacc${M}x${N};
-
- const int32_t voutput_zero_point = params->${PARAMS_STRUCT}.output_zero_point;
- $for M in range(MR):
- $for N in range(NR):
- int32_t vout${M}x${N} = (int32_t) vrndacc${M}x${N} + voutput_zero_point;
+ int32_t vout${M}x${N} = vrndacc${M}x${N} + voutput_zero_point;
if XNN_LIKELY(nc >= ${NR}) {
$for M in range(MR):
diff --git a/src/qs8-igemm/gen/1x2-minmax-fp32-scalar-magic.c b/src/qs8-igemm/gen/1x2-minmax-fp32-scalar-fmagic.c
similarity index 89%
rename from src/qs8-igemm/gen/1x2-minmax-fp32-scalar-magic.c
rename to src/qs8-igemm/gen/1x2-minmax-fp32-scalar-fmagic.c
index 7317283..805c01f 100644
--- a/src/qs8-igemm/gen/1x2-minmax-fp32-scalar-magic.c
+++ b/src/qs8-igemm/gen/1x2-minmax-fp32-scalar-fmagic.c
@@ -15,7 +15,7 @@
#include <xnnpack/gemm.h>
-void xnn_qs8_igemm_minmax_fp32_ukernel_1x2__scalar_magic(
+void xnn_qs8_igemm_minmax_fp32_ukernel_1x2__scalar_fmagic(
size_t mr,
size_t nc,
size_t kc,
@@ -74,23 +74,23 @@
float vfpacc0x0 = (float) vacc0x0;
float vfpacc0x1 = (float) vacc0x1;
- const float vscale = params->fp32_scalar_magic.scale;
+ const float vscale = params->fp32_scalar_fmagic.scale;
vfpacc0x0 *= vscale;
vfpacc0x1 *= vscale;
- const float voutput_min_less_zero_point = params->fp32_scalar_magic.output_min_less_zero_point;
+ const float voutput_min_less_zero_point = params->fp32_scalar_fmagic.output_min_less_zero_point;
vfpacc0x0 = math_max_f32(vfpacc0x0, voutput_min_less_zero_point);
vfpacc0x1 = math_max_f32(vfpacc0x1, voutput_min_less_zero_point);
- const float voutput_max_less_zero_point = params->fp32_scalar_magic.output_max_less_zero_point;
+ const float voutput_max_less_zero_point = params->fp32_scalar_fmagic.output_max_less_zero_point;
vfpacc0x0 = math_min_f32(vfpacc0x0, voutput_max_less_zero_point);
vfpacc0x1 = math_min_f32(vfpacc0x1, voutput_max_less_zero_point);
- const float vmagic_bias = params->fp32_scalar_magic.magic_bias;
+ const float vmagic_bias = params->fp32_scalar_fmagic.magic_bias;
vfpacc0x0 += vmagic_bias;
vfpacc0x1 += vmagic_bias;
- const int32_t vmagic_bias_less_output_zero_point = params->fp32_scalar_magic.magic_bias_less_output_zero_point;
+ const int32_t vmagic_bias_less_output_zero_point = params->fp32_scalar_fmagic.magic_bias_less_output_zero_point;
int32_t vout0x0 = (int32_t) fp32_to_bits(vfpacc0x0) - vmagic_bias_less_output_zero_point;
int32_t vout0x1 = (int32_t) fp32_to_bits(vfpacc0x1) - vmagic_bias_less_output_zero_point;
diff --git a/src/qs8-igemm/gen/1x2-minmax-fp32-scalar-lrint.c b/src/qs8-igemm/gen/1x2-minmax-fp32-scalar-lrint.c
deleted file mode 100644
index f021ef2..0000000
--- a/src/qs8-igemm/gen/1x2-minmax-fp32-scalar-lrint.c
+++ /dev/null
@@ -1,111 +0,0 @@
-// Auto-generated file. Do not edit!
-// Template: src/qs8-igemm/scalar.c.in
-// Generator: tools/xngen
-//
-// Copyright 2021 Google LLC
-//
-// This source code is licensed under the BSD-style license found in the
-// LICENSE file in the root directory of this source tree.
-
-#include <assert.h>
-#include <math.h>
-
-#include <xnnpack/math.h>
-#include <xnnpack/gemm.h>
-
-
-void xnn_qs8_igemm_minmax_fp32_ukernel_1x2__scalar_lrint(
- size_t mr,
- size_t nc,
- size_t kc,
- size_t ks,
- const int8_t**restrict a,
- const void*restrict w,
- int8_t*restrict c,
- size_t cm_stride,
- size_t cn_stride,
- size_t a_offset,
- const int8_t* zero,
- const union xnn_qs8_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
-{
- assert(mr != 0);
- assert(mr <= 1);
- assert(nc != 0);
- assert(kc != 0);
- assert(ks != 0);
- assert(ks % (1 * sizeof(void*)) == 0);
- assert(a != NULL);
- assert(w != NULL);
- assert(c != NULL);
-
- int8_t* c0 = c;
-
- do {
- int32_t vacc0x0 = ((const int32_t*) w)[0];
- int32_t vacc0x1 = ((const int32_t*) w)[1];
- w = (const void*) ((const int32_t*) w + 2);
-
- size_t p = ks;
- do {
- const int8_t* restrict a0 = a[0];
- assert(a0 != NULL);
- if XNN_UNPREDICTABLE(a0 != zero) {
- a0 = (const int8_t*) ((uintptr_t) a0 + a_offset);
- }
- a += 1;
-
- size_t k = kc;
- do {
- const int32_t va0 = (int32_t) *a0++;
-
- const int32_t vb0 = (int32_t) ((const int8_t*) w)[0];
- const int32_t vb1 = (int32_t) ((const int8_t*) w)[1];
- w = (const void*) ((const int8_t*) w + 2);
-
- vacc0x0 += va0 * vb0;
- vacc0x1 += va0 * vb1;
-
- k -= sizeof(int8_t);
- } while (k != 0);
- p -= 1 * sizeof(void*);
- } while (p != 0);
-
- float vfpacc0x0 = (float) vacc0x0;
- float vfpacc0x1 = (float) vacc0x1;
-
- const float vscale = params->fp32_scalar_lrint.scale;
- vfpacc0x0 *= vscale;
- vfpacc0x1 *= vscale;
-
- long vrndacc0x0 = lrintf(vfpacc0x0);
- long vrndacc0x1 = lrintf(vfpacc0x1);
-
- const long voutput_min_less_zero_point = params->fp32_scalar_lrint.output_min_less_zero_point;
- vrndacc0x0 = XNN_UNPREDICTABLE(vrndacc0x0 < voutput_min_less_zero_point) ? voutput_min_less_zero_point : vrndacc0x0;
- vrndacc0x1 = XNN_UNPREDICTABLE(vrndacc0x1 < voutput_min_less_zero_point) ? voutput_min_less_zero_point : vrndacc0x1;
-
- const long voutput_max_less_zero_point = params->fp32_scalar_lrint.output_max_less_zero_point;
- vrndacc0x0 = XNN_UNPREDICTABLE(vrndacc0x0 > voutput_max_less_zero_point) ? voutput_max_less_zero_point : vrndacc0x0;
- vrndacc0x1 = XNN_UNPREDICTABLE(vrndacc0x1 > voutput_max_less_zero_point) ? voutput_max_less_zero_point : vrndacc0x1;
-
- const int32_t voutput_zero_point = params->fp32_scalar_lrint.output_zero_point;
- int32_t vout0x0 = (int32_t) vrndacc0x0 + voutput_zero_point;
- int32_t vout0x1 = (int32_t) vrndacc0x1 + voutput_zero_point;
-
- if XNN_LIKELY(nc >= 2) {
- c0[0] = (int8_t) vout0x0;
- c0[1] = (int8_t) vout0x1;
-
- c0 = (int8_t*) ((uintptr_t) c0 + cn_stride);
-
- a = (const int8_t**restrict) ((uintptr_t) a - ks);
- nc -= 2;
- } else {
- if (nc & 1) {
- c0[0] = (int8_t) vout0x0;
- }
-
- nc = 0;
- }
- } while (nc != 0);
-}
diff --git a/src/qs8-igemm/gen/1x2-minmax-fp32-scalar-magic.c b/src/qs8-igemm/gen/1x2-minmax-fp32-scalar-lrintf.c
similarity index 79%
copy from src/qs8-igemm/gen/1x2-minmax-fp32-scalar-magic.c
copy to src/qs8-igemm/gen/1x2-minmax-fp32-scalar-lrintf.c
index 7317283..691d0ec 100644
--- a/src/qs8-igemm/gen/1x2-minmax-fp32-scalar-magic.c
+++ b/src/qs8-igemm/gen/1x2-minmax-fp32-scalar-lrintf.c
@@ -8,14 +8,13 @@
// LICENSE file in the root directory of this source tree.
#include <assert.h>
-
-#include <fp16.h>
+#include <math.h>
#include <xnnpack/math.h>
#include <xnnpack/gemm.h>
-void xnn_qs8_igemm_minmax_fp32_ukernel_1x2__scalar_magic(
+void xnn_qs8_igemm_minmax_fp32_ukernel_1x2__scalar_lrintf(
size_t mr,
size_t nc,
size_t kc,
@@ -74,25 +73,24 @@
float vfpacc0x0 = (float) vacc0x0;
float vfpacc0x1 = (float) vacc0x1;
- const float vscale = params->fp32_scalar_magic.scale;
+ const float vscale = params->fp32_scalar_lrintf.scale;
vfpacc0x0 *= vscale;
vfpacc0x1 *= vscale;
- const float voutput_min_less_zero_point = params->fp32_scalar_magic.output_min_less_zero_point;
+ const float voutput_min_less_zero_point = params->fp32_scalar_lrintf.output_min_less_zero_point;
vfpacc0x0 = math_max_f32(vfpacc0x0, voutput_min_less_zero_point);
vfpacc0x1 = math_max_f32(vfpacc0x1, voutput_min_less_zero_point);
- const float voutput_max_less_zero_point = params->fp32_scalar_magic.output_max_less_zero_point;
+ const float voutput_max_less_zero_point = params->fp32_scalar_lrintf.output_max_less_zero_point;
vfpacc0x0 = math_min_f32(vfpacc0x0, voutput_max_less_zero_point);
vfpacc0x1 = math_min_f32(vfpacc0x1, voutput_max_less_zero_point);
- const float vmagic_bias = params->fp32_scalar_magic.magic_bias;
- vfpacc0x0 += vmagic_bias;
- vfpacc0x1 += vmagic_bias;
+ const int32_t vrndacc0x0 = (int32_t) lrintf(vfpacc0x0);
+ const int32_t vrndacc0x1 = (int32_t) lrintf(vfpacc0x1);
- const int32_t vmagic_bias_less_output_zero_point = params->fp32_scalar_magic.magic_bias_less_output_zero_point;
- int32_t vout0x0 = (int32_t) fp32_to_bits(vfpacc0x0) - vmagic_bias_less_output_zero_point;
- int32_t vout0x1 = (int32_t) fp32_to_bits(vfpacc0x1) - vmagic_bias_less_output_zero_point;
+ const int32_t voutput_zero_point = params->fp32_scalar_lrintf.output_zero_point;
+ int32_t vout0x0 = vrndacc0x0 + voutput_zero_point;
+ int32_t vout0x1 = vrndacc0x1 + voutput_zero_point;
if XNN_LIKELY(nc >= 2) {
c0[0] = (int8_t) vout0x0;
diff --git a/src/qs8-igemm/gen/1x2-minmax-rndnu-scalar.c b/src/qs8-igemm/gen/1x2-minmax-rndnu-scalar.c
deleted file mode 100644
index f3140ff..0000000
--- a/src/qs8-igemm/gen/1x2-minmax-rndnu-scalar.c
+++ /dev/null
@@ -1,109 +0,0 @@
-// Auto-generated file. Do not edit!
-// Template: src/qs8-igemm/scalar.c.in
-// Generator: tools/xngen
-//
-// Copyright 2021 Google LLC
-//
-// This source code is licensed under the BSD-style license found in the
-// LICENSE file in the root directory of this source tree.
-
-#include <assert.h>
-
-#include <xnnpack/math.h>
-#include <xnnpack/gemm.h>
-
-
-void xnn_qs8_igemm_minmax_rndnu_ukernel_1x2__scalar(
- size_t mr,
- size_t nc,
- size_t kc,
- size_t ks,
- const int8_t**restrict a,
- const void*restrict w,
- int8_t*restrict c,
- size_t cm_stride,
- size_t cn_stride,
- size_t a_offset,
- const int8_t* zero,
- const union xnn_qs8_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
-{
- assert(mr != 0);
- assert(mr <= 1);
- assert(nc != 0);
- assert(kc != 0);
- assert(ks != 0);
- assert(ks % (1 * sizeof(void*)) == 0);
- assert(a != NULL);
- assert(w != NULL);
- assert(c != NULL);
-
- int8_t* c0 = c;
-
- do {
- int32_t vacc0x0 = ((const int32_t*) w)[0];
- int32_t vacc0x1 = ((const int32_t*) w)[1];
- w = (const void*) ((const int32_t*) w + 2);
-
- size_t p = ks;
- do {
- const int8_t* restrict a0 = a[0];
- assert(a0 != NULL);
- if XNN_UNPREDICTABLE(a0 != zero) {
- a0 = (const int8_t*) ((uintptr_t) a0 + a_offset);
- }
- a += 1;
-
- size_t k = kc;
- do {
- const int32_t va0 = (int32_t) *a0++;
-
- const int32_t vb0 = (int32_t) ((const int8_t*) w)[0];
- const int32_t vb1 = (int32_t) ((const int8_t*) w)[1];
- w = (const void*) ((const int8_t*) w + 2);
-
- vacc0x0 += va0 * vb0;
- vacc0x1 += va0 * vb1;
-
- k -= sizeof(int8_t);
- } while (k != 0);
- p -= 1 * sizeof(void*);
- } while (p != 0);
-
- const int32_t vmultiplier = params->rndnu_scalar.multiplier;
- const int64_t vproduct0x0 = (int64_t) vacc0x0 * (int64_t) vmultiplier;
- const int64_t vproduct0x1 = (int64_t) vacc0x1 * (int64_t) vmultiplier;
-
- const uint32_t shift = params->rndnu_scalar.shift;
- const int64_t rounding = params->rndnu_scalar.rounding;
- int32_t vout0x0 = (int32_t) asr_s64(vproduct0x0 + rounding, shift);
- int32_t vout0x1 = (int32_t) asr_s64(vproduct0x1 + rounding, shift);
-
- const int32_t voutput_min_less_zero_point = params->rndnu_scalar.output_min_less_zero_point;
- vout0x0 = math_max_s32(vout0x0, voutput_min_less_zero_point);
- vout0x1 = math_max_s32(vout0x1, voutput_min_less_zero_point);
-
- const int32_t voutput_max_less_zero_point = params->rndnu_scalar.output_max_less_zero_point;
- vout0x0 = math_min_s32(vout0x0, voutput_max_less_zero_point);
- vout0x1 = math_min_s32(vout0x1, voutput_max_less_zero_point);
-
- const int32_t voutput_zero_point = params->rndnu_scalar.output_zero_point;
- vout0x0 += voutput_zero_point;
- vout0x1 += voutput_zero_point;
-
- if XNN_LIKELY(nc >= 2) {
- c0[0] = (int8_t) vout0x0;
- c0[1] = (int8_t) vout0x1;
-
- c0 = (int8_t*) ((uintptr_t) c0 + cn_stride);
-
- a = (const int8_t**restrict) ((uintptr_t) a - ks);
- nc -= 2;
- } else {
- if (nc & 1) {
- c0[0] = (int8_t) vout0x0;
- }
-
- nc = 0;
- }
- } while (nc != 0);
-}
diff --git a/src/qs8-igemm/gen/1x4-minmax-fp32-scalar-magic.c b/src/qs8-igemm/gen/1x4-minmax-fp32-scalar-fmagic.c
similarity index 92%
rename from src/qs8-igemm/gen/1x4-minmax-fp32-scalar-magic.c
rename to src/qs8-igemm/gen/1x4-minmax-fp32-scalar-fmagic.c
index 0d30c1e..aec64ed 100644
--- a/src/qs8-igemm/gen/1x4-minmax-fp32-scalar-magic.c
+++ b/src/qs8-igemm/gen/1x4-minmax-fp32-scalar-fmagic.c
@@ -15,7 +15,7 @@
#include <xnnpack/gemm.h>
-void xnn_qs8_igemm_minmax_fp32_ukernel_1x4__scalar_magic(
+void xnn_qs8_igemm_minmax_fp32_ukernel_1x4__scalar_fmagic(
size_t mr,
size_t nc,
size_t kc,
@@ -82,31 +82,31 @@
float vfpacc0x2 = (float) vacc0x2;
float vfpacc0x3 = (float) vacc0x3;
- const float vscale = params->fp32_scalar_magic.scale;
+ const float vscale = params->fp32_scalar_fmagic.scale;
vfpacc0x0 *= vscale;
vfpacc0x1 *= vscale;
vfpacc0x2 *= vscale;
vfpacc0x3 *= vscale;
- const float voutput_min_less_zero_point = params->fp32_scalar_magic.output_min_less_zero_point;
+ const float voutput_min_less_zero_point = params->fp32_scalar_fmagic.output_min_less_zero_point;
vfpacc0x0 = math_max_f32(vfpacc0x0, voutput_min_less_zero_point);
vfpacc0x1 = math_max_f32(vfpacc0x1, voutput_min_less_zero_point);
vfpacc0x2 = math_max_f32(vfpacc0x2, voutput_min_less_zero_point);
vfpacc0x3 = math_max_f32(vfpacc0x3, voutput_min_less_zero_point);
- const float voutput_max_less_zero_point = params->fp32_scalar_magic.output_max_less_zero_point;
+ const float voutput_max_less_zero_point = params->fp32_scalar_fmagic.output_max_less_zero_point;
vfpacc0x0 = math_min_f32(vfpacc0x0, voutput_max_less_zero_point);
vfpacc0x1 = math_min_f32(vfpacc0x1, voutput_max_less_zero_point);
vfpacc0x2 = math_min_f32(vfpacc0x2, voutput_max_less_zero_point);
vfpacc0x3 = math_min_f32(vfpacc0x3, voutput_max_less_zero_point);
- const float vmagic_bias = params->fp32_scalar_magic.magic_bias;
+ const float vmagic_bias = params->fp32_scalar_fmagic.magic_bias;
vfpacc0x0 += vmagic_bias;
vfpacc0x1 += vmagic_bias;
vfpacc0x2 += vmagic_bias;
vfpacc0x3 += vmagic_bias;
- const int32_t vmagic_bias_less_output_zero_point = params->fp32_scalar_magic.magic_bias_less_output_zero_point;
+ const int32_t vmagic_bias_less_output_zero_point = params->fp32_scalar_fmagic.magic_bias_less_output_zero_point;
int32_t vout0x0 = (int32_t) fp32_to_bits(vfpacc0x0) - vmagic_bias_less_output_zero_point;
int32_t vout0x1 = (int32_t) fp32_to_bits(vfpacc0x1) - vmagic_bias_less_output_zero_point;
int32_t vout0x2 = (int32_t) fp32_to_bits(vfpacc0x2) - vmagic_bias_less_output_zero_point;
diff --git a/src/qs8-igemm/gen/1x4-minmax-fp32-scalar-lrint.c b/src/qs8-igemm/gen/1x4-minmax-fp32-scalar-lrint.c
deleted file mode 100644
index 315d380..0000000
--- a/src/qs8-igemm/gen/1x4-minmax-fp32-scalar-lrint.c
+++ /dev/null
@@ -1,137 +0,0 @@
-// Auto-generated file. Do not edit!
-// Template: src/qs8-igemm/scalar.c.in
-// Generator: tools/xngen
-//
-// Copyright 2021 Google LLC
-//
-// This source code is licensed under the BSD-style license found in the
-// LICENSE file in the root directory of this source tree.
-
-#include <assert.h>
-#include <math.h>
-
-#include <xnnpack/math.h>
-#include <xnnpack/gemm.h>
-
-
-void xnn_qs8_igemm_minmax_fp32_ukernel_1x4__scalar_lrint(
- size_t mr,
- size_t nc,
- size_t kc,
- size_t ks,
- const int8_t**restrict a,
- const void*restrict w,
- int8_t*restrict c,
- size_t cm_stride,
- size_t cn_stride,
- size_t a_offset,
- const int8_t* zero,
- const union xnn_qs8_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
-{
- assert(mr != 0);
- assert(mr <= 1);
- assert(nc != 0);
- assert(kc != 0);
- assert(ks != 0);
- assert(ks % (1 * sizeof(void*)) == 0);
- assert(a != NULL);
- assert(w != NULL);
- assert(c != NULL);
-
- int8_t* c0 = c;
-
- do {
- int32_t vacc0x0 = ((const int32_t*) w)[0];
- int32_t vacc0x1 = ((const int32_t*) w)[1];
- int32_t vacc0x2 = ((const int32_t*) w)[2];
- int32_t vacc0x3 = ((const int32_t*) w)[3];
- w = (const void*) ((const int32_t*) w + 4);
-
- size_t p = ks;
- do {
- const int8_t* restrict a0 = a[0];
- assert(a0 != NULL);
- if XNN_UNPREDICTABLE(a0 != zero) {
- a0 = (const int8_t*) ((uintptr_t) a0 + a_offset);
- }
- a += 1;
-
- size_t k = kc;
- do {
- const int32_t va0 = (int32_t) *a0++;
-
- const int32_t vb0 = (int32_t) ((const int8_t*) w)[0];
- const int32_t vb1 = (int32_t) ((const int8_t*) w)[1];
- const int32_t vb2 = (int32_t) ((const int8_t*) w)[2];
- const int32_t vb3 = (int32_t) ((const int8_t*) w)[3];
- w = (const void*) ((const int8_t*) w + 4);
-
- vacc0x0 += va0 * vb0;
- vacc0x1 += va0 * vb1;
- vacc0x2 += va0 * vb2;
- vacc0x3 += va0 * vb3;
-
- k -= sizeof(int8_t);
- } while (k != 0);
- p -= 1 * sizeof(void*);
- } while (p != 0);
-
- float vfpacc0x0 = (float) vacc0x0;
- float vfpacc0x1 = (float) vacc0x1;
- float vfpacc0x2 = (float) vacc0x2;
- float vfpacc0x3 = (float) vacc0x3;
-
- const float vscale = params->fp32_scalar_lrint.scale;
- vfpacc0x0 *= vscale;
- vfpacc0x1 *= vscale;
- vfpacc0x2 *= vscale;
- vfpacc0x3 *= vscale;
-
- long vrndacc0x0 = lrintf(vfpacc0x0);
- long vrndacc0x1 = lrintf(vfpacc0x1);
- long vrndacc0x2 = lrintf(vfpacc0x2);
- long vrndacc0x3 = lrintf(vfpacc0x3);
-
- const long voutput_min_less_zero_point = params->fp32_scalar_lrint.output_min_less_zero_point;
- vrndacc0x0 = XNN_UNPREDICTABLE(vrndacc0x0 < voutput_min_less_zero_point) ? voutput_min_less_zero_point : vrndacc0x0;
- vrndacc0x1 = XNN_UNPREDICTABLE(vrndacc0x1 < voutput_min_less_zero_point) ? voutput_min_less_zero_point : vrndacc0x1;
- vrndacc0x2 = XNN_UNPREDICTABLE(vrndacc0x2 < voutput_min_less_zero_point) ? voutput_min_less_zero_point : vrndacc0x2;
- vrndacc0x3 = XNN_UNPREDICTABLE(vrndacc0x3 < voutput_min_less_zero_point) ? voutput_min_less_zero_point : vrndacc0x3;
-
- const long voutput_max_less_zero_point = params->fp32_scalar_lrint.output_max_less_zero_point;
- vrndacc0x0 = XNN_UNPREDICTABLE(vrndacc0x0 > voutput_max_less_zero_point) ? voutput_max_less_zero_point : vrndacc0x0;
- vrndacc0x1 = XNN_UNPREDICTABLE(vrndacc0x1 > voutput_max_less_zero_point) ? voutput_max_less_zero_point : vrndacc0x1;
- vrndacc0x2 = XNN_UNPREDICTABLE(vrndacc0x2 > voutput_max_less_zero_point) ? voutput_max_less_zero_point : vrndacc0x2;
- vrndacc0x3 = XNN_UNPREDICTABLE(vrndacc0x3 > voutput_max_less_zero_point) ? voutput_max_less_zero_point : vrndacc0x3;
-
- const int32_t voutput_zero_point = params->fp32_scalar_lrint.output_zero_point;
- int32_t vout0x0 = (int32_t) vrndacc0x0 + voutput_zero_point;
- int32_t vout0x1 = (int32_t) vrndacc0x1 + voutput_zero_point;
- int32_t vout0x2 = (int32_t) vrndacc0x2 + voutput_zero_point;
- int32_t vout0x3 = (int32_t) vrndacc0x3 + voutput_zero_point;
-
- if XNN_LIKELY(nc >= 4) {
- c0[0] = (int8_t) vout0x0;
- c0[1] = (int8_t) vout0x1;
- c0[2] = (int8_t) vout0x2;
- c0[3] = (int8_t) vout0x3;
-
- c0 = (int8_t*) ((uintptr_t) c0 + cn_stride);
-
- a = (const int8_t**restrict) ((uintptr_t) a - ks);
- nc -= 4;
- } else {
- if (nc & 2) {
- c0[0] = (int8_t) vout0x0;
- c0[1] = (int8_t) vout0x1;
- vout0x0 = vout0x2;
- c0 += 2;
- }
- if (nc & 1) {
- c0[0] = (int8_t) vout0x0;
- }
-
- nc = 0;
- }
- } while (nc != 0);
-}
diff --git a/src/qs8-igemm/gen/1x4-minmax-fp32-scalar-magic.c b/src/qs8-igemm/gen/1x4-minmax-fp32-scalar-lrintf.c
similarity index 79%
copy from src/qs8-igemm/gen/1x4-minmax-fp32-scalar-magic.c
copy to src/qs8-igemm/gen/1x4-minmax-fp32-scalar-lrintf.c
index 0d30c1e..6450bce 100644
--- a/src/qs8-igemm/gen/1x4-minmax-fp32-scalar-magic.c
+++ b/src/qs8-igemm/gen/1x4-minmax-fp32-scalar-lrintf.c
@@ -8,14 +8,13 @@
// LICENSE file in the root directory of this source tree.
#include <assert.h>
-
-#include <fp16.h>
+#include <math.h>
#include <xnnpack/math.h>
#include <xnnpack/gemm.h>
-void xnn_qs8_igemm_minmax_fp32_ukernel_1x4__scalar_magic(
+void xnn_qs8_igemm_minmax_fp32_ukernel_1x4__scalar_lrintf(
size_t mr,
size_t nc,
size_t kc,
@@ -82,35 +81,34 @@
float vfpacc0x2 = (float) vacc0x2;
float vfpacc0x3 = (float) vacc0x3;
- const float vscale = params->fp32_scalar_magic.scale;
+ const float vscale = params->fp32_scalar_lrintf.scale;
vfpacc0x0 *= vscale;
vfpacc0x1 *= vscale;
vfpacc0x2 *= vscale;
vfpacc0x3 *= vscale;
- const float voutput_min_less_zero_point = params->fp32_scalar_magic.output_min_less_zero_point;
+ const float voutput_min_less_zero_point = params->fp32_scalar_lrintf.output_min_less_zero_point;
vfpacc0x0 = math_max_f32(vfpacc0x0, voutput_min_less_zero_point);
vfpacc0x1 = math_max_f32(vfpacc0x1, voutput_min_less_zero_point);
vfpacc0x2 = math_max_f32(vfpacc0x2, voutput_min_less_zero_point);
vfpacc0x3 = math_max_f32(vfpacc0x3, voutput_min_less_zero_point);
- const float voutput_max_less_zero_point = params->fp32_scalar_magic.output_max_less_zero_point;
+ const float voutput_max_less_zero_point = params->fp32_scalar_lrintf.output_max_less_zero_point;
vfpacc0x0 = math_min_f32(vfpacc0x0, voutput_max_less_zero_point);
vfpacc0x1 = math_min_f32(vfpacc0x1, voutput_max_less_zero_point);
vfpacc0x2 = math_min_f32(vfpacc0x2, voutput_max_less_zero_point);
vfpacc0x3 = math_min_f32(vfpacc0x3, voutput_max_less_zero_point);
- const float vmagic_bias = params->fp32_scalar_magic.magic_bias;
- vfpacc0x0 += vmagic_bias;
- vfpacc0x1 += vmagic_bias;
- vfpacc0x2 += vmagic_bias;
- vfpacc0x3 += vmagic_bias;
+ const int32_t vrndacc0x0 = (int32_t) lrintf(vfpacc0x0);
+ const int32_t vrndacc0x1 = (int32_t) lrintf(vfpacc0x1);
+ const int32_t vrndacc0x2 = (int32_t) lrintf(vfpacc0x2);
+ const int32_t vrndacc0x3 = (int32_t) lrintf(vfpacc0x3);
- const int32_t vmagic_bias_less_output_zero_point = params->fp32_scalar_magic.magic_bias_less_output_zero_point;
- int32_t vout0x0 = (int32_t) fp32_to_bits(vfpacc0x0) - vmagic_bias_less_output_zero_point;
- int32_t vout0x1 = (int32_t) fp32_to_bits(vfpacc0x1) - vmagic_bias_less_output_zero_point;
- int32_t vout0x2 = (int32_t) fp32_to_bits(vfpacc0x2) - vmagic_bias_less_output_zero_point;
- int32_t vout0x3 = (int32_t) fp32_to_bits(vfpacc0x3) - vmagic_bias_less_output_zero_point;
+ const int32_t voutput_zero_point = params->fp32_scalar_lrintf.output_zero_point;
+ int32_t vout0x0 = vrndacc0x0 + voutput_zero_point;
+ int32_t vout0x1 = vrndacc0x1 + voutput_zero_point;
+ int32_t vout0x2 = vrndacc0x2 + voutput_zero_point;
+ int32_t vout0x3 = vrndacc0x3 + voutput_zero_point;
if XNN_LIKELY(nc >= 4) {
c0[0] = (int8_t) vout0x0;
diff --git a/src/qs8-igemm/gen/1x4-minmax-rndnu-scalar.c b/src/qs8-igemm/gen/1x4-minmax-rndnu-scalar.c
deleted file mode 100644
index 0849a0b..0000000
--- a/src/qs8-igemm/gen/1x4-minmax-rndnu-scalar.c
+++ /dev/null
@@ -1,133 +0,0 @@
-// Auto-generated file. Do not edit!
-// Template: src/qs8-igemm/scalar.c.in
-// Generator: tools/xngen
-//
-// Copyright 2021 Google LLC
-//
-// This source code is licensed under the BSD-style license found in the
-// LICENSE file in the root directory of this source tree.
-
-#include <assert.h>
-
-#include <xnnpack/math.h>
-#include <xnnpack/gemm.h>
-
-
-void xnn_qs8_igemm_minmax_rndnu_ukernel_1x4__scalar(
- size_t mr,
- size_t nc,
- size_t kc,
- size_t ks,
- const int8_t**restrict a,
- const void*restrict w,
- int8_t*restrict c,
- size_t cm_stride,
- size_t cn_stride,
- size_t a_offset,
- const int8_t* zero,
- const union xnn_qs8_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
-{
- assert(mr != 0);
- assert(mr <= 1);
- assert(nc != 0);
- assert(kc != 0);
- assert(ks != 0);
- assert(ks % (1 * sizeof(void*)) == 0);
- assert(a != NULL);
- assert(w != NULL);
- assert(c != NULL);
-
- int8_t* c0 = c;
-
- do {
- int32_t vacc0x0 = ((const int32_t*) w)[0];
- int32_t vacc0x1 = ((const int32_t*) w)[1];
- int32_t vacc0x2 = ((const int32_t*) w)[2];
- int32_t vacc0x3 = ((const int32_t*) w)[3];
- w = (const void*) ((const int32_t*) w + 4);
-
- size_t p = ks;
- do {
- const int8_t* restrict a0 = a[0];
- assert(a0 != NULL);
- if XNN_UNPREDICTABLE(a0 != zero) {
- a0 = (const int8_t*) ((uintptr_t) a0 + a_offset);
- }
- a += 1;
-
- size_t k = kc;
- do {
- const int32_t va0 = (int32_t) *a0++;
-
- const int32_t vb0 = (int32_t) ((const int8_t*) w)[0];
- const int32_t vb1 = (int32_t) ((const int8_t*) w)[1];
- const int32_t vb2 = (int32_t) ((const int8_t*) w)[2];
- const int32_t vb3 = (int32_t) ((const int8_t*) w)[3];
- w = (const void*) ((const int8_t*) w + 4);
-
- vacc0x0 += va0 * vb0;
- vacc0x1 += va0 * vb1;
- vacc0x2 += va0 * vb2;
- vacc0x3 += va0 * vb3;
-
- k -= sizeof(int8_t);
- } while (k != 0);
- p -= 1 * sizeof(void*);
- } while (p != 0);
-
- const int32_t vmultiplier = params->rndnu_scalar.multiplier;
- const int64_t vproduct0x0 = (int64_t) vacc0x0 * (int64_t) vmultiplier;
- const int64_t vproduct0x1 = (int64_t) vacc0x1 * (int64_t) vmultiplier;
- const int64_t vproduct0x2 = (int64_t) vacc0x2 * (int64_t) vmultiplier;
- const int64_t vproduct0x3 = (int64_t) vacc0x3 * (int64_t) vmultiplier;
-
- const uint32_t shift = params->rndnu_scalar.shift;
- const int64_t rounding = params->rndnu_scalar.rounding;
- int32_t vout0x0 = (int32_t) asr_s64(vproduct0x0 + rounding, shift);
- int32_t vout0x1 = (int32_t) asr_s64(vproduct0x1 + rounding, shift);
- int32_t vout0x2 = (int32_t) asr_s64(vproduct0x2 + rounding, shift);
- int32_t vout0x3 = (int32_t) asr_s64(vproduct0x3 + rounding, shift);
-
- const int32_t voutput_min_less_zero_point = params->rndnu_scalar.output_min_less_zero_point;
- vout0x0 = math_max_s32(vout0x0, voutput_min_less_zero_point);
- vout0x1 = math_max_s32(vout0x1, voutput_min_less_zero_point);
- vout0x2 = math_max_s32(vout0x2, voutput_min_less_zero_point);
- vout0x3 = math_max_s32(vout0x3, voutput_min_less_zero_point);
-
- const int32_t voutput_max_less_zero_point = params->rndnu_scalar.output_max_less_zero_point;
- vout0x0 = math_min_s32(vout0x0, voutput_max_less_zero_point);
- vout0x1 = math_min_s32(vout0x1, voutput_max_less_zero_point);
- vout0x2 = math_min_s32(vout0x2, voutput_max_less_zero_point);
- vout0x3 = math_min_s32(vout0x3, voutput_max_less_zero_point);
-
- const int32_t voutput_zero_point = params->rndnu_scalar.output_zero_point;
- vout0x0 += voutput_zero_point;
- vout0x1 += voutput_zero_point;
- vout0x2 += voutput_zero_point;
- vout0x3 += voutput_zero_point;
-
- if XNN_LIKELY(nc >= 4) {
- c0[0] = (int8_t) vout0x0;
- c0[1] = (int8_t) vout0x1;
- c0[2] = (int8_t) vout0x2;
- c0[3] = (int8_t) vout0x3;
-
- c0 = (int8_t*) ((uintptr_t) c0 + cn_stride);
-
- a = (const int8_t**restrict) ((uintptr_t) a - ks);
- nc -= 4;
- } else {
- if (nc & 2) {
- c0[0] = (int8_t) vout0x0;
- c0[1] = (int8_t) vout0x1;
- vout0x0 = vout0x2;
- c0 += 2;
- }
- if (nc & 1) {
- c0[0] = (int8_t) vout0x0;
- }
-
- nc = 0;
- }
- } while (nc != 0);
-}
diff --git a/src/qs8-igemm/gen/2x2-minmax-fp32-scalar-magic.c b/src/qs8-igemm/gen/2x2-minmax-fp32-scalar-fmagic.c
similarity index 92%
rename from src/qs8-igemm/gen/2x2-minmax-fp32-scalar-magic.c
rename to src/qs8-igemm/gen/2x2-minmax-fp32-scalar-fmagic.c
index 85299e0..1788331 100644
--- a/src/qs8-igemm/gen/2x2-minmax-fp32-scalar-magic.c
+++ b/src/qs8-igemm/gen/2x2-minmax-fp32-scalar-fmagic.c
@@ -15,7 +15,7 @@
#include <xnnpack/gemm.h>
-void xnn_qs8_igemm_minmax_fp32_ukernel_2x2__scalar_magic(
+void xnn_qs8_igemm_minmax_fp32_ukernel_2x2__scalar_fmagic(
size_t mr,
size_t nc,
size_t kc,
@@ -90,31 +90,31 @@
float vfpacc1x0 = (float) vacc1x0;
float vfpacc1x1 = (float) vacc1x1;
- const float vscale = params->fp32_scalar_magic.scale;
+ const float vscale = params->fp32_scalar_fmagic.scale;
vfpacc0x0 *= vscale;
vfpacc0x1 *= vscale;
vfpacc1x0 *= vscale;
vfpacc1x1 *= vscale;
- const float voutput_min_less_zero_point = params->fp32_scalar_magic.output_min_less_zero_point;
+ const float voutput_min_less_zero_point = params->fp32_scalar_fmagic.output_min_less_zero_point;
vfpacc0x0 = math_max_f32(vfpacc0x0, voutput_min_less_zero_point);
vfpacc0x1 = math_max_f32(vfpacc0x1, voutput_min_less_zero_point);
vfpacc1x0 = math_max_f32(vfpacc1x0, voutput_min_less_zero_point);
vfpacc1x1 = math_max_f32(vfpacc1x1, voutput_min_less_zero_point);
- const float voutput_max_less_zero_point = params->fp32_scalar_magic.output_max_less_zero_point;
+ const float voutput_max_less_zero_point = params->fp32_scalar_fmagic.output_max_less_zero_point;
vfpacc0x0 = math_min_f32(vfpacc0x0, voutput_max_less_zero_point);
vfpacc0x1 = math_min_f32(vfpacc0x1, voutput_max_less_zero_point);
vfpacc1x0 = math_min_f32(vfpacc1x0, voutput_max_less_zero_point);
vfpacc1x1 = math_min_f32(vfpacc1x1, voutput_max_less_zero_point);
- const float vmagic_bias = params->fp32_scalar_magic.magic_bias;
+ const float vmagic_bias = params->fp32_scalar_fmagic.magic_bias;
vfpacc0x0 += vmagic_bias;
vfpacc0x1 += vmagic_bias;
vfpacc1x0 += vmagic_bias;
vfpacc1x1 += vmagic_bias;
- const int32_t vmagic_bias_less_output_zero_point = params->fp32_scalar_magic.magic_bias_less_output_zero_point;
+ const int32_t vmagic_bias_less_output_zero_point = params->fp32_scalar_fmagic.magic_bias_less_output_zero_point;
int32_t vout0x0 = (int32_t) fp32_to_bits(vfpacc0x0) - vmagic_bias_less_output_zero_point;
int32_t vout0x1 = (int32_t) fp32_to_bits(vfpacc0x1) - vmagic_bias_less_output_zero_point;
int32_t vout1x0 = (int32_t) fp32_to_bits(vfpacc1x0) - vmagic_bias_less_output_zero_point;
diff --git a/src/qs8-igemm/gen/2x2-minmax-fp32-scalar-lrint.c b/src/qs8-igemm/gen/2x2-minmax-fp32-scalar-lrint.c
deleted file mode 100644
index 72bb9a4..0000000
--- a/src/qs8-igemm/gen/2x2-minmax-fp32-scalar-lrint.c
+++ /dev/null
@@ -1,141 +0,0 @@
-// Auto-generated file. Do not edit!
-// Template: src/qs8-igemm/scalar.c.in
-// Generator: tools/xngen
-//
-// Copyright 2021 Google LLC
-//
-// This source code is licensed under the BSD-style license found in the
-// LICENSE file in the root directory of this source tree.
-
-#include <assert.h>
-#include <math.h>
-
-#include <xnnpack/math.h>
-#include <xnnpack/gemm.h>
-
-
-void xnn_qs8_igemm_minmax_fp32_ukernel_2x2__scalar_lrint(
- size_t mr,
- size_t nc,
- size_t kc,
- size_t ks,
- const int8_t**restrict a,
- const void*restrict w,
- int8_t*restrict c,
- size_t cm_stride,
- size_t cn_stride,
- size_t a_offset,
- const int8_t* zero,
- const union xnn_qs8_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
-{
- assert(mr != 0);
- assert(mr <= 2);
- assert(nc != 0);
- assert(kc != 0);
- assert(ks != 0);
- assert(ks % (2 * sizeof(void*)) == 0);
- assert(a != NULL);
- assert(w != NULL);
- assert(c != NULL);
-
- int8_t* c0 = c;
- int8_t* c1 = (int8_t*) ((uintptr_t) c0 + cm_stride);
- if XNN_UNPREDICTABLE(mr != 2) {
- c1 = c0;
- }
-
- do {
- int32_t vacc0x0 = ((const int32_t*) w)[0];
- int32_t vacc0x1 = ((const int32_t*) w)[1];
- int32_t vacc1x0 = vacc0x0;
- int32_t vacc1x1 = vacc0x1;
- w = (const void*) ((const int32_t*) w + 2);
-
- size_t p = ks;
- do {
- const int8_t* restrict a0 = a[0];
- assert(a0 != NULL);
- if XNN_UNPREDICTABLE(a0 != zero) {
- a0 = (const int8_t*) ((uintptr_t) a0 + a_offset);
- }
- const int8_t* restrict a1 = a[1];
- assert(a1 != NULL);
- if XNN_UNPREDICTABLE(a1 != zero) {
- a1 = (const int8_t*) ((uintptr_t) a1 + a_offset);
- }
- a += 2;
-
- size_t k = kc;
- do {
- const int32_t va0 = (int32_t) *a0++;
- const int32_t va1 = (int32_t) *a1++;
-
- const int32_t vb0 = (int32_t) ((const int8_t*) w)[0];
- const int32_t vb1 = (int32_t) ((const int8_t*) w)[1];
- w = (const void*) ((const int8_t*) w + 2);
-
- vacc0x0 += va0 * vb0;
- vacc0x1 += va0 * vb1;
- vacc1x0 += va1 * vb0;
- vacc1x1 += va1 * vb1;
-
- k -= sizeof(int8_t);
- } while (k != 0);
- p -= 2 * sizeof(void*);
- } while (p != 0);
-
- float vfpacc0x0 = (float) vacc0x0;
- float vfpacc0x1 = (float) vacc0x1;
- float vfpacc1x0 = (float) vacc1x0;
- float vfpacc1x1 = (float) vacc1x1;
-
- const float vscale = params->fp32_scalar_lrint.scale;
- vfpacc0x0 *= vscale;
- vfpacc0x1 *= vscale;
- vfpacc1x0 *= vscale;
- vfpacc1x1 *= vscale;
-
- long vrndacc0x0 = lrintf(vfpacc0x0);
- long vrndacc0x1 = lrintf(vfpacc0x1);
- long vrndacc1x0 = lrintf(vfpacc1x0);
- long vrndacc1x1 = lrintf(vfpacc1x1);
-
- const long voutput_min_less_zero_point = params->fp32_scalar_lrint.output_min_less_zero_point;
- vrndacc0x0 = XNN_UNPREDICTABLE(vrndacc0x0 < voutput_min_less_zero_point) ? voutput_min_less_zero_point : vrndacc0x0;
- vrndacc0x1 = XNN_UNPREDICTABLE(vrndacc0x1 < voutput_min_less_zero_point) ? voutput_min_less_zero_point : vrndacc0x1;
- vrndacc1x0 = XNN_UNPREDICTABLE(vrndacc1x0 < voutput_min_less_zero_point) ? voutput_min_less_zero_point : vrndacc1x0;
- vrndacc1x1 = XNN_UNPREDICTABLE(vrndacc1x1 < voutput_min_less_zero_point) ? voutput_min_less_zero_point : vrndacc1x1;
-
- const long voutput_max_less_zero_point = params->fp32_scalar_lrint.output_max_less_zero_point;
- vrndacc0x0 = XNN_UNPREDICTABLE(vrndacc0x0 > voutput_max_less_zero_point) ? voutput_max_less_zero_point : vrndacc0x0;
- vrndacc0x1 = XNN_UNPREDICTABLE(vrndacc0x1 > voutput_max_less_zero_point) ? voutput_max_less_zero_point : vrndacc0x1;
- vrndacc1x0 = XNN_UNPREDICTABLE(vrndacc1x0 > voutput_max_less_zero_point) ? voutput_max_less_zero_point : vrndacc1x0;
- vrndacc1x1 = XNN_UNPREDICTABLE(vrndacc1x1 > voutput_max_less_zero_point) ? voutput_max_less_zero_point : vrndacc1x1;
-
- const int32_t voutput_zero_point = params->fp32_scalar_lrint.output_zero_point;
- int32_t vout0x0 = (int32_t) vrndacc0x0 + voutput_zero_point;
- int32_t vout0x1 = (int32_t) vrndacc0x1 + voutput_zero_point;
- int32_t vout1x0 = (int32_t) vrndacc1x0 + voutput_zero_point;
- int32_t vout1x1 = (int32_t) vrndacc1x1 + voutput_zero_point;
-
- if XNN_LIKELY(nc >= 2) {
- c1[0] = (int8_t) vout1x0;
- c1[1] = (int8_t) vout1x1;
- c0[0] = (int8_t) vout0x0;
- c0[1] = (int8_t) vout0x1;
-
- c1 = (int8_t*) ((uintptr_t) c1 + cn_stride);
- c0 = (int8_t*) ((uintptr_t) c0 + cn_stride);
-
- a = (const int8_t**restrict) ((uintptr_t) a - ks);
- nc -= 2;
- } else {
- if (nc & 1) {
- c1[0] = (int8_t) vout1x0;
- c0[0] = (int8_t) vout0x0;
- }
-
- nc = 0;
- }
- } while (nc != 0);
-}
diff --git a/src/qs8-igemm/gen/2x2-minmax-fp32-scalar-magic.c b/src/qs8-igemm/gen/2x2-minmax-fp32-scalar-lrintf.c
similarity index 79%
copy from src/qs8-igemm/gen/2x2-minmax-fp32-scalar-magic.c
copy to src/qs8-igemm/gen/2x2-minmax-fp32-scalar-lrintf.c
index 85299e0..80877b2 100644
--- a/src/qs8-igemm/gen/2x2-minmax-fp32-scalar-magic.c
+++ b/src/qs8-igemm/gen/2x2-minmax-fp32-scalar-lrintf.c
@@ -8,14 +8,13 @@
// LICENSE file in the root directory of this source tree.
#include <assert.h>
-
-#include <fp16.h>
+#include <math.h>
#include <xnnpack/math.h>
#include <xnnpack/gemm.h>
-void xnn_qs8_igemm_minmax_fp32_ukernel_2x2__scalar_magic(
+void xnn_qs8_igemm_minmax_fp32_ukernel_2x2__scalar_lrintf(
size_t mr,
size_t nc,
size_t kc,
@@ -90,35 +89,34 @@
float vfpacc1x0 = (float) vacc1x0;
float vfpacc1x1 = (float) vacc1x1;
- const float vscale = params->fp32_scalar_magic.scale;
+ const float vscale = params->fp32_scalar_lrintf.scale;
vfpacc0x0 *= vscale;
vfpacc0x1 *= vscale;
vfpacc1x0 *= vscale;
vfpacc1x1 *= vscale;
- const float voutput_min_less_zero_point = params->fp32_scalar_magic.output_min_less_zero_point;
+ const float voutput_min_less_zero_point = params->fp32_scalar_lrintf.output_min_less_zero_point;
vfpacc0x0 = math_max_f32(vfpacc0x0, voutput_min_less_zero_point);
vfpacc0x1 = math_max_f32(vfpacc0x1, voutput_min_less_zero_point);
vfpacc1x0 = math_max_f32(vfpacc1x0, voutput_min_less_zero_point);
vfpacc1x1 = math_max_f32(vfpacc1x1, voutput_min_less_zero_point);
- const float voutput_max_less_zero_point = params->fp32_scalar_magic.output_max_less_zero_point;
+ const float voutput_max_less_zero_point = params->fp32_scalar_lrintf.output_max_less_zero_point;
vfpacc0x0 = math_min_f32(vfpacc0x0, voutput_max_less_zero_point);
vfpacc0x1 = math_min_f32(vfpacc0x1, voutput_max_less_zero_point);
vfpacc1x0 = math_min_f32(vfpacc1x0, voutput_max_less_zero_point);
vfpacc1x1 = math_min_f32(vfpacc1x1, voutput_max_less_zero_point);
- const float vmagic_bias = params->fp32_scalar_magic.magic_bias;
- vfpacc0x0 += vmagic_bias;
- vfpacc0x1 += vmagic_bias;
- vfpacc1x0 += vmagic_bias;
- vfpacc1x1 += vmagic_bias;
+ const int32_t vrndacc0x0 = (int32_t) lrintf(vfpacc0x0);
+ const int32_t vrndacc0x1 = (int32_t) lrintf(vfpacc0x1);
+ const int32_t vrndacc1x0 = (int32_t) lrintf(vfpacc1x0);
+ const int32_t vrndacc1x1 = (int32_t) lrintf(vfpacc1x1);
- const int32_t vmagic_bias_less_output_zero_point = params->fp32_scalar_magic.magic_bias_less_output_zero_point;
- int32_t vout0x0 = (int32_t) fp32_to_bits(vfpacc0x0) - vmagic_bias_less_output_zero_point;
- int32_t vout0x1 = (int32_t) fp32_to_bits(vfpacc0x1) - vmagic_bias_less_output_zero_point;
- int32_t vout1x0 = (int32_t) fp32_to_bits(vfpacc1x0) - vmagic_bias_less_output_zero_point;
- int32_t vout1x1 = (int32_t) fp32_to_bits(vfpacc1x1) - vmagic_bias_less_output_zero_point;
+ const int32_t voutput_zero_point = params->fp32_scalar_lrintf.output_zero_point;
+ int32_t vout0x0 = vrndacc0x0 + voutput_zero_point;
+ int32_t vout0x1 = vrndacc0x1 + voutput_zero_point;
+ int32_t vout1x0 = vrndacc1x0 + voutput_zero_point;
+ int32_t vout1x1 = vrndacc1x1 + voutput_zero_point;
if XNN_LIKELY(nc >= 2) {
c1[0] = (int8_t) vout1x0;
diff --git a/src/qs8-igemm/gen/2x2-minmax-rndnu-scalar.c b/src/qs8-igemm/gen/2x2-minmax-rndnu-scalar.c
deleted file mode 100644
index f738d50..0000000
--- a/src/qs8-igemm/gen/2x2-minmax-rndnu-scalar.c
+++ /dev/null
@@ -1,137 +0,0 @@
-// Auto-generated file. Do not edit!
-// Template: src/qs8-igemm/scalar.c.in
-// Generator: tools/xngen
-//
-// Copyright 2021 Google LLC
-//
-// This source code is licensed under the BSD-style license found in the
-// LICENSE file in the root directory of this source tree.
-
-#include <assert.h>
-
-#include <xnnpack/math.h>
-#include <xnnpack/gemm.h>
-
-
-void xnn_qs8_igemm_minmax_rndnu_ukernel_2x2__scalar(
- size_t mr,
- size_t nc,
- size_t kc,
- size_t ks,
- const int8_t**restrict a,
- const void*restrict w,
- int8_t*restrict c,
- size_t cm_stride,
- size_t cn_stride,
- size_t a_offset,
- const int8_t* zero,
- const union xnn_qs8_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
-{
- assert(mr != 0);
- assert(mr <= 2);
- assert(nc != 0);
- assert(kc != 0);
- assert(ks != 0);
- assert(ks % (2 * sizeof(void*)) == 0);
- assert(a != NULL);
- assert(w != NULL);
- assert(c != NULL);
-
- int8_t* c0 = c;
- int8_t* c1 = (int8_t*) ((uintptr_t) c0 + cm_stride);
- if XNN_UNPREDICTABLE(mr != 2) {
- c1 = c0;
- }
-
- do {
- int32_t vacc0x0 = ((const int32_t*) w)[0];
- int32_t vacc0x1 = ((const int32_t*) w)[1];
- int32_t vacc1x0 = vacc0x0;
- int32_t vacc1x1 = vacc0x1;
- w = (const void*) ((const int32_t*) w + 2);
-
- size_t p = ks;
- do {
- const int8_t* restrict a0 = a[0];
- assert(a0 != NULL);
- if XNN_UNPREDICTABLE(a0 != zero) {
- a0 = (const int8_t*) ((uintptr_t) a0 + a_offset);
- }
- const int8_t* restrict a1 = a[1];
- assert(a1 != NULL);
- if XNN_UNPREDICTABLE(a1 != zero) {
- a1 = (const int8_t*) ((uintptr_t) a1 + a_offset);
- }
- a += 2;
-
- size_t k = kc;
- do {
- const int32_t va0 = (int32_t) *a0++;
- const int32_t va1 = (int32_t) *a1++;
-
- const int32_t vb0 = (int32_t) ((const int8_t*) w)[0];
- const int32_t vb1 = (int32_t) ((const int8_t*) w)[1];
- w = (const void*) ((const int8_t*) w + 2);
-
- vacc0x0 += va0 * vb0;
- vacc0x1 += va0 * vb1;
- vacc1x0 += va1 * vb0;
- vacc1x1 += va1 * vb1;
-
- k -= sizeof(int8_t);
- } while (k != 0);
- p -= 2 * sizeof(void*);
- } while (p != 0);
-
- const int32_t vmultiplier = params->rndnu_scalar.multiplier;
- const int64_t vproduct0x0 = (int64_t) vacc0x0 * (int64_t) vmultiplier;
- const int64_t vproduct0x1 = (int64_t) vacc0x1 * (int64_t) vmultiplier;
- const int64_t vproduct1x0 = (int64_t) vacc1x0 * (int64_t) vmultiplier;
- const int64_t vproduct1x1 = (int64_t) vacc1x1 * (int64_t) vmultiplier;
-
- const uint32_t shift = params->rndnu_scalar.shift;
- const int64_t rounding = params->rndnu_scalar.rounding;
- int32_t vout0x0 = (int32_t) asr_s64(vproduct0x0 + rounding, shift);
- int32_t vout0x1 = (int32_t) asr_s64(vproduct0x1 + rounding, shift);
- int32_t vout1x0 = (int32_t) asr_s64(vproduct1x0 + rounding, shift);
- int32_t vout1x1 = (int32_t) asr_s64(vproduct1x1 + rounding, shift);
-
- const int32_t voutput_min_less_zero_point = params->rndnu_scalar.output_min_less_zero_point;
- vout0x0 = math_max_s32(vout0x0, voutput_min_less_zero_point);
- vout0x1 = math_max_s32(vout0x1, voutput_min_less_zero_point);
- vout1x0 = math_max_s32(vout1x0, voutput_min_less_zero_point);
- vout1x1 = math_max_s32(vout1x1, voutput_min_less_zero_point);
-
- const int32_t voutput_max_less_zero_point = params->rndnu_scalar.output_max_less_zero_point;
- vout0x0 = math_min_s32(vout0x0, voutput_max_less_zero_point);
- vout0x1 = math_min_s32(vout0x1, voutput_max_less_zero_point);
- vout1x0 = math_min_s32(vout1x0, voutput_max_less_zero_point);
- vout1x1 = math_min_s32(vout1x1, voutput_max_less_zero_point);
-
- const int32_t voutput_zero_point = params->rndnu_scalar.output_zero_point;
- vout0x0 += voutput_zero_point;
- vout0x1 += voutput_zero_point;
- vout1x0 += voutput_zero_point;
- vout1x1 += voutput_zero_point;
-
- if XNN_LIKELY(nc >= 2) {
- c1[0] = (int8_t) vout1x0;
- c1[1] = (int8_t) vout1x1;
- c0[0] = (int8_t) vout0x0;
- c0[1] = (int8_t) vout0x1;
-
- c1 = (int8_t*) ((uintptr_t) c1 + cn_stride);
- c0 = (int8_t*) ((uintptr_t) c0 + cn_stride);
-
- a = (const int8_t**restrict) ((uintptr_t) a - ks);
- nc -= 2;
- } else {
- if (nc & 1) {
- c1[0] = (int8_t) vout1x0;
- c0[0] = (int8_t) vout0x0;
- }
-
- nc = 0;
- }
- } while (nc != 0);
-}
diff --git a/src/qs8-igemm/gen/2x4-minmax-fp32-scalar-magic.c b/src/qs8-igemm/gen/2x4-minmax-fp32-scalar-fmagic.c
similarity index 94%
rename from src/qs8-igemm/gen/2x4-minmax-fp32-scalar-magic.c
rename to src/qs8-igemm/gen/2x4-minmax-fp32-scalar-fmagic.c
index af7302f..5272f1e 100644
--- a/src/qs8-igemm/gen/2x4-minmax-fp32-scalar-magic.c
+++ b/src/qs8-igemm/gen/2x4-minmax-fp32-scalar-fmagic.c
@@ -15,7 +15,7 @@
#include <xnnpack/gemm.h>
-void xnn_qs8_igemm_minmax_fp32_ukernel_2x4__scalar_magic(
+void xnn_qs8_igemm_minmax_fp32_ukernel_2x4__scalar_fmagic(
size_t mr,
size_t nc,
size_t kc,
@@ -104,7 +104,7 @@
float vfpacc1x2 = (float) vacc1x2;
float vfpacc1x3 = (float) vacc1x3;
- const float vscale = params->fp32_scalar_magic.scale;
+ const float vscale = params->fp32_scalar_fmagic.scale;
vfpacc0x0 *= vscale;
vfpacc0x1 *= vscale;
vfpacc0x2 *= vscale;
@@ -114,7 +114,7 @@
vfpacc1x2 *= vscale;
vfpacc1x3 *= vscale;
- const float voutput_min_less_zero_point = params->fp32_scalar_magic.output_min_less_zero_point;
+ const float voutput_min_less_zero_point = params->fp32_scalar_fmagic.output_min_less_zero_point;
vfpacc0x0 = math_max_f32(vfpacc0x0, voutput_min_less_zero_point);
vfpacc0x1 = math_max_f32(vfpacc0x1, voutput_min_less_zero_point);
vfpacc0x2 = math_max_f32(vfpacc0x2, voutput_min_less_zero_point);
@@ -124,7 +124,7 @@
vfpacc1x2 = math_max_f32(vfpacc1x2, voutput_min_less_zero_point);
vfpacc1x3 = math_max_f32(vfpacc1x3, voutput_min_less_zero_point);
- const float voutput_max_less_zero_point = params->fp32_scalar_magic.output_max_less_zero_point;
+ const float voutput_max_less_zero_point = params->fp32_scalar_fmagic.output_max_less_zero_point;
vfpacc0x0 = math_min_f32(vfpacc0x0, voutput_max_less_zero_point);
vfpacc0x1 = math_min_f32(vfpacc0x1, voutput_max_less_zero_point);
vfpacc0x2 = math_min_f32(vfpacc0x2, voutput_max_less_zero_point);
@@ -134,7 +134,7 @@
vfpacc1x2 = math_min_f32(vfpacc1x2, voutput_max_less_zero_point);
vfpacc1x3 = math_min_f32(vfpacc1x3, voutput_max_less_zero_point);
- const float vmagic_bias = params->fp32_scalar_magic.magic_bias;
+ const float vmagic_bias = params->fp32_scalar_fmagic.magic_bias;
vfpacc0x0 += vmagic_bias;
vfpacc0x1 += vmagic_bias;
vfpacc0x2 += vmagic_bias;
@@ -144,7 +144,7 @@
vfpacc1x2 += vmagic_bias;
vfpacc1x3 += vmagic_bias;
- const int32_t vmagic_bias_less_output_zero_point = params->fp32_scalar_magic.magic_bias_less_output_zero_point;
+ const int32_t vmagic_bias_less_output_zero_point = params->fp32_scalar_fmagic.magic_bias_less_output_zero_point;
int32_t vout0x0 = (int32_t) fp32_to_bits(vfpacc0x0) - vmagic_bias_less_output_zero_point;
int32_t vout0x1 = (int32_t) fp32_to_bits(vfpacc0x1) - vmagic_bias_less_output_zero_point;
int32_t vout0x2 = (int32_t) fp32_to_bits(vfpacc0x2) - vmagic_bias_less_output_zero_point;
diff --git a/src/qs8-igemm/gen/2x4-minmax-fp32-scalar-lrint.c b/src/qs8-igemm/gen/2x4-minmax-fp32-scalar-lrint.c
deleted file mode 100644
index df83dd5..0000000
--- a/src/qs8-igemm/gen/2x4-minmax-fp32-scalar-lrint.c
+++ /dev/null
@@ -1,189 +0,0 @@
-// Auto-generated file. Do not edit!
-// Template: src/qs8-igemm/scalar.c.in
-// Generator: tools/xngen
-//
-// Copyright 2021 Google LLC
-//
-// This source code is licensed under the BSD-style license found in the
-// LICENSE file in the root directory of this source tree.
-
-#include <assert.h>
-#include <math.h>
-
-#include <xnnpack/math.h>
-#include <xnnpack/gemm.h>
-
-
-void xnn_qs8_igemm_minmax_fp32_ukernel_2x4__scalar_lrint(
- size_t mr,
- size_t nc,
- size_t kc,
- size_t ks,
- const int8_t**restrict a,
- const void*restrict w,
- int8_t*restrict c,
- size_t cm_stride,
- size_t cn_stride,
- size_t a_offset,
- const int8_t* zero,
- const union xnn_qs8_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
-{
- assert(mr != 0);
- assert(mr <= 2);
- assert(nc != 0);
- assert(kc != 0);
- assert(ks != 0);
- assert(ks % (2 * sizeof(void*)) == 0);
- assert(a != NULL);
- assert(w != NULL);
- assert(c != NULL);
-
- int8_t* c0 = c;
- int8_t* c1 = (int8_t*) ((uintptr_t) c0 + cm_stride);
- if XNN_UNPREDICTABLE(mr != 2) {
- c1 = c0;
- }
-
- do {
- int32_t vacc0x0 = ((const int32_t*) w)[0];
- int32_t vacc0x1 = ((const int32_t*) w)[1];
- int32_t vacc0x2 = ((const int32_t*) w)[2];
- int32_t vacc0x3 = ((const int32_t*) w)[3];
- int32_t vacc1x0 = vacc0x0;
- int32_t vacc1x1 = vacc0x1;
- int32_t vacc1x2 = vacc0x2;
- int32_t vacc1x3 = vacc0x3;
- w = (const void*) ((const int32_t*) w + 4);
-
- size_t p = ks;
- do {
- const int8_t* restrict a0 = a[0];
- assert(a0 != NULL);
- if XNN_UNPREDICTABLE(a0 != zero) {
- a0 = (const int8_t*) ((uintptr_t) a0 + a_offset);
- }
- const int8_t* restrict a1 = a[1];
- assert(a1 != NULL);
- if XNN_UNPREDICTABLE(a1 != zero) {
- a1 = (const int8_t*) ((uintptr_t) a1 + a_offset);
- }
- a += 2;
-
- size_t k = kc;
- do {
- const int32_t va0 = (int32_t) *a0++;
- const int32_t va1 = (int32_t) *a1++;
-
- const int32_t vb0 = (int32_t) ((const int8_t*) w)[0];
- const int32_t vb1 = (int32_t) ((const int8_t*) w)[1];
- const int32_t vb2 = (int32_t) ((const int8_t*) w)[2];
- const int32_t vb3 = (int32_t) ((const int8_t*) w)[3];
- w = (const void*) ((const int8_t*) w + 4);
-
- vacc0x0 += va0 * vb0;
- vacc0x1 += va0 * vb1;
- vacc0x2 += va0 * vb2;
- vacc0x3 += va0 * vb3;
- vacc1x0 += va1 * vb0;
- vacc1x1 += va1 * vb1;
- vacc1x2 += va1 * vb2;
- vacc1x3 += va1 * vb3;
-
- k -= sizeof(int8_t);
- } while (k != 0);
- p -= 2 * sizeof(void*);
- } while (p != 0);
-
- float vfpacc0x0 = (float) vacc0x0;
- float vfpacc0x1 = (float) vacc0x1;
- float vfpacc0x2 = (float) vacc0x2;
- float vfpacc0x3 = (float) vacc0x3;
- float vfpacc1x0 = (float) vacc1x0;
- float vfpacc1x1 = (float) vacc1x1;
- float vfpacc1x2 = (float) vacc1x2;
- float vfpacc1x3 = (float) vacc1x3;
-
- const float vscale = params->fp32_scalar_lrint.scale;
- vfpacc0x0 *= vscale;
- vfpacc0x1 *= vscale;
- vfpacc0x2 *= vscale;
- vfpacc0x3 *= vscale;
- vfpacc1x0 *= vscale;
- vfpacc1x1 *= vscale;
- vfpacc1x2 *= vscale;
- vfpacc1x3 *= vscale;
-
- long vrndacc0x0 = lrintf(vfpacc0x0);
- long vrndacc0x1 = lrintf(vfpacc0x1);
- long vrndacc0x2 = lrintf(vfpacc0x2);
- long vrndacc0x3 = lrintf(vfpacc0x3);
- long vrndacc1x0 = lrintf(vfpacc1x0);
- long vrndacc1x1 = lrintf(vfpacc1x1);
- long vrndacc1x2 = lrintf(vfpacc1x2);
- long vrndacc1x3 = lrintf(vfpacc1x3);
-
- const long voutput_min_less_zero_point = params->fp32_scalar_lrint.output_min_less_zero_point;
- vrndacc0x0 = XNN_UNPREDICTABLE(vrndacc0x0 < voutput_min_less_zero_point) ? voutput_min_less_zero_point : vrndacc0x0;
- vrndacc0x1 = XNN_UNPREDICTABLE(vrndacc0x1 < voutput_min_less_zero_point) ? voutput_min_less_zero_point : vrndacc0x1;
- vrndacc0x2 = XNN_UNPREDICTABLE(vrndacc0x2 < voutput_min_less_zero_point) ? voutput_min_less_zero_point : vrndacc0x2;
- vrndacc0x3 = XNN_UNPREDICTABLE(vrndacc0x3 < voutput_min_less_zero_point) ? voutput_min_less_zero_point : vrndacc0x3;
- vrndacc1x0 = XNN_UNPREDICTABLE(vrndacc1x0 < voutput_min_less_zero_point) ? voutput_min_less_zero_point : vrndacc1x0;
- vrndacc1x1 = XNN_UNPREDICTABLE(vrndacc1x1 < voutput_min_less_zero_point) ? voutput_min_less_zero_point : vrndacc1x1;
- vrndacc1x2 = XNN_UNPREDICTABLE(vrndacc1x2 < voutput_min_less_zero_point) ? voutput_min_less_zero_point : vrndacc1x2;
- vrndacc1x3 = XNN_UNPREDICTABLE(vrndacc1x3 < voutput_min_less_zero_point) ? voutput_min_less_zero_point : vrndacc1x3;
-
- const long voutput_max_less_zero_point = params->fp32_scalar_lrint.output_max_less_zero_point;
- vrndacc0x0 = XNN_UNPREDICTABLE(vrndacc0x0 > voutput_max_less_zero_point) ? voutput_max_less_zero_point : vrndacc0x0;
- vrndacc0x1 = XNN_UNPREDICTABLE(vrndacc0x1 > voutput_max_less_zero_point) ? voutput_max_less_zero_point : vrndacc0x1;
- vrndacc0x2 = XNN_UNPREDICTABLE(vrndacc0x2 > voutput_max_less_zero_point) ? voutput_max_less_zero_point : vrndacc0x2;
- vrndacc0x3 = XNN_UNPREDICTABLE(vrndacc0x3 > voutput_max_less_zero_point) ? voutput_max_less_zero_point : vrndacc0x3;
- vrndacc1x0 = XNN_UNPREDICTABLE(vrndacc1x0 > voutput_max_less_zero_point) ? voutput_max_less_zero_point : vrndacc1x0;
- vrndacc1x1 = XNN_UNPREDICTABLE(vrndacc1x1 > voutput_max_less_zero_point) ? voutput_max_less_zero_point : vrndacc1x1;
- vrndacc1x2 = XNN_UNPREDICTABLE(vrndacc1x2 > voutput_max_less_zero_point) ? voutput_max_less_zero_point : vrndacc1x2;
- vrndacc1x3 = XNN_UNPREDICTABLE(vrndacc1x3 > voutput_max_less_zero_point) ? voutput_max_less_zero_point : vrndacc1x3;
-
- const int32_t voutput_zero_point = params->fp32_scalar_lrint.output_zero_point;
- int32_t vout0x0 = (int32_t) vrndacc0x0 + voutput_zero_point;
- int32_t vout0x1 = (int32_t) vrndacc0x1 + voutput_zero_point;
- int32_t vout0x2 = (int32_t) vrndacc0x2 + voutput_zero_point;
- int32_t vout0x3 = (int32_t) vrndacc0x3 + voutput_zero_point;
- int32_t vout1x0 = (int32_t) vrndacc1x0 + voutput_zero_point;
- int32_t vout1x1 = (int32_t) vrndacc1x1 + voutput_zero_point;
- int32_t vout1x2 = (int32_t) vrndacc1x2 + voutput_zero_point;
- int32_t vout1x3 = (int32_t) vrndacc1x3 + voutput_zero_point;
-
- if XNN_LIKELY(nc >= 4) {
- c1[0] = (int8_t) vout1x0;
- c1[1] = (int8_t) vout1x1;
- c1[2] = (int8_t) vout1x2;
- c1[3] = (int8_t) vout1x3;
- c0[0] = (int8_t) vout0x0;
- c0[1] = (int8_t) vout0x1;
- c0[2] = (int8_t) vout0x2;
- c0[3] = (int8_t) vout0x3;
-
- c1 = (int8_t*) ((uintptr_t) c1 + cn_stride);
- c0 = (int8_t*) ((uintptr_t) c0 + cn_stride);
-
- a = (const int8_t**restrict) ((uintptr_t) a - ks);
- nc -= 4;
- } else {
- if (nc & 2) {
- c1[0] = (int8_t) vout1x0;
- c1[1] = (int8_t) vout1x1;
- vout1x0 = vout1x2;
- c1 += 2;
- c0[0] = (int8_t) vout0x0;
- c0[1] = (int8_t) vout0x1;
- vout0x0 = vout0x2;
- c0 += 2;
- }
- if (nc & 1) {
- c1[0] = (int8_t) vout1x0;
- c0[0] = (int8_t) vout0x0;
- }
-
- nc = 0;
- }
- } while (nc != 0);
-}
diff --git a/src/qs8-igemm/gen/2x4-minmax-fp32-scalar-magic.c b/src/qs8-igemm/gen/2x4-minmax-fp32-scalar-lrintf.c
similarity index 78%
copy from src/qs8-igemm/gen/2x4-minmax-fp32-scalar-magic.c
copy to src/qs8-igemm/gen/2x4-minmax-fp32-scalar-lrintf.c
index af7302f..dad2dbe 100644
--- a/src/qs8-igemm/gen/2x4-minmax-fp32-scalar-magic.c
+++ b/src/qs8-igemm/gen/2x4-minmax-fp32-scalar-lrintf.c
@@ -8,14 +8,13 @@
// LICENSE file in the root directory of this source tree.
#include <assert.h>
-
-#include <fp16.h>
+#include <math.h>
#include <xnnpack/math.h>
#include <xnnpack/gemm.h>
-void xnn_qs8_igemm_minmax_fp32_ukernel_2x4__scalar_magic(
+void xnn_qs8_igemm_minmax_fp32_ukernel_2x4__scalar_lrintf(
size_t mr,
size_t nc,
size_t kc,
@@ -104,7 +103,7 @@
float vfpacc1x2 = (float) vacc1x2;
float vfpacc1x3 = (float) vacc1x3;
- const float vscale = params->fp32_scalar_magic.scale;
+ const float vscale = params->fp32_scalar_lrintf.scale;
vfpacc0x0 *= vscale;
vfpacc0x1 *= vscale;
vfpacc0x2 *= vscale;
@@ -114,7 +113,7 @@
vfpacc1x2 *= vscale;
vfpacc1x3 *= vscale;
- const float voutput_min_less_zero_point = params->fp32_scalar_magic.output_min_less_zero_point;
+ const float voutput_min_less_zero_point = params->fp32_scalar_lrintf.output_min_less_zero_point;
vfpacc0x0 = math_max_f32(vfpacc0x0, voutput_min_less_zero_point);
vfpacc0x1 = math_max_f32(vfpacc0x1, voutput_min_less_zero_point);
vfpacc0x2 = math_max_f32(vfpacc0x2, voutput_min_less_zero_point);
@@ -124,7 +123,7 @@
vfpacc1x2 = math_max_f32(vfpacc1x2, voutput_min_less_zero_point);
vfpacc1x3 = math_max_f32(vfpacc1x3, voutput_min_less_zero_point);
- const float voutput_max_less_zero_point = params->fp32_scalar_magic.output_max_less_zero_point;
+ const float voutput_max_less_zero_point = params->fp32_scalar_lrintf.output_max_less_zero_point;
vfpacc0x0 = math_min_f32(vfpacc0x0, voutput_max_less_zero_point);
vfpacc0x1 = math_min_f32(vfpacc0x1, voutput_max_less_zero_point);
vfpacc0x2 = math_min_f32(vfpacc0x2, voutput_max_less_zero_point);
@@ -134,25 +133,24 @@
vfpacc1x2 = math_min_f32(vfpacc1x2, voutput_max_less_zero_point);
vfpacc1x3 = math_min_f32(vfpacc1x3, voutput_max_less_zero_point);
- const float vmagic_bias = params->fp32_scalar_magic.magic_bias;
- vfpacc0x0 += vmagic_bias;
- vfpacc0x1 += vmagic_bias;
- vfpacc0x2 += vmagic_bias;
- vfpacc0x3 += vmagic_bias;
- vfpacc1x0 += vmagic_bias;
- vfpacc1x1 += vmagic_bias;
- vfpacc1x2 += vmagic_bias;
- vfpacc1x3 += vmagic_bias;
+ const int32_t vrndacc0x0 = (int32_t) lrintf(vfpacc0x0);
+ const int32_t vrndacc0x1 = (int32_t) lrintf(vfpacc0x1);
+ const int32_t vrndacc0x2 = (int32_t) lrintf(vfpacc0x2);
+ const int32_t vrndacc0x3 = (int32_t) lrintf(vfpacc0x3);
+ const int32_t vrndacc1x0 = (int32_t) lrintf(vfpacc1x0);
+ const int32_t vrndacc1x1 = (int32_t) lrintf(vfpacc1x1);
+ const int32_t vrndacc1x2 = (int32_t) lrintf(vfpacc1x2);
+ const int32_t vrndacc1x3 = (int32_t) lrintf(vfpacc1x3);
- const int32_t vmagic_bias_less_output_zero_point = params->fp32_scalar_magic.magic_bias_less_output_zero_point;
- int32_t vout0x0 = (int32_t) fp32_to_bits(vfpacc0x0) - vmagic_bias_less_output_zero_point;
- int32_t vout0x1 = (int32_t) fp32_to_bits(vfpacc0x1) - vmagic_bias_less_output_zero_point;
- int32_t vout0x2 = (int32_t) fp32_to_bits(vfpacc0x2) - vmagic_bias_less_output_zero_point;
- int32_t vout0x3 = (int32_t) fp32_to_bits(vfpacc0x3) - vmagic_bias_less_output_zero_point;
- int32_t vout1x0 = (int32_t) fp32_to_bits(vfpacc1x0) - vmagic_bias_less_output_zero_point;
- int32_t vout1x1 = (int32_t) fp32_to_bits(vfpacc1x1) - vmagic_bias_less_output_zero_point;
- int32_t vout1x2 = (int32_t) fp32_to_bits(vfpacc1x2) - vmagic_bias_less_output_zero_point;
- int32_t vout1x3 = (int32_t) fp32_to_bits(vfpacc1x3) - vmagic_bias_less_output_zero_point;
+ const int32_t voutput_zero_point = params->fp32_scalar_lrintf.output_zero_point;
+ int32_t vout0x0 = vrndacc0x0 + voutput_zero_point;
+ int32_t vout0x1 = vrndacc0x1 + voutput_zero_point;
+ int32_t vout0x2 = vrndacc0x2 + voutput_zero_point;
+ int32_t vout0x3 = vrndacc0x3 + voutput_zero_point;
+ int32_t vout1x0 = vrndacc1x0 + voutput_zero_point;
+ int32_t vout1x1 = vrndacc1x1 + voutput_zero_point;
+ int32_t vout1x2 = vrndacc1x2 + voutput_zero_point;
+ int32_t vout1x3 = vrndacc1x3 + voutput_zero_point;
if XNN_LIKELY(nc >= 4) {
c1[0] = (int8_t) vout1x0;
diff --git a/src/qs8-igemm/gen/2x4-minmax-rndnu-scalar.c b/src/qs8-igemm/gen/2x4-minmax-rndnu-scalar.c
deleted file mode 100644
index 57524d0..0000000
--- a/src/qs8-igemm/gen/2x4-minmax-rndnu-scalar.c
+++ /dev/null
@@ -1,181 +0,0 @@
-// Auto-generated file. Do not edit!
-// Template: src/qs8-igemm/scalar.c.in
-// Generator: tools/xngen
-//
-// Copyright 2021 Google LLC
-//
-// This source code is licensed under the BSD-style license found in the
-// LICENSE file in the root directory of this source tree.
-
-#include <assert.h>
-
-#include <xnnpack/math.h>
-#include <xnnpack/gemm.h>
-
-
-void xnn_qs8_igemm_minmax_rndnu_ukernel_2x4__scalar(
- size_t mr,
- size_t nc,
- size_t kc,
- size_t ks,
- const int8_t**restrict a,
- const void*restrict w,
- int8_t*restrict c,
- size_t cm_stride,
- size_t cn_stride,
- size_t a_offset,
- const int8_t* zero,
- const union xnn_qs8_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
-{
- assert(mr != 0);
- assert(mr <= 2);
- assert(nc != 0);
- assert(kc != 0);
- assert(ks != 0);
- assert(ks % (2 * sizeof(void*)) == 0);
- assert(a != NULL);
- assert(w != NULL);
- assert(c != NULL);
-
- int8_t* c0 = c;
- int8_t* c1 = (int8_t*) ((uintptr_t) c0 + cm_stride);
- if XNN_UNPREDICTABLE(mr != 2) {
- c1 = c0;
- }
-
- do {
- int32_t vacc0x0 = ((const int32_t*) w)[0];
- int32_t vacc0x1 = ((const int32_t*) w)[1];
- int32_t vacc0x2 = ((const int32_t*) w)[2];
- int32_t vacc0x3 = ((const int32_t*) w)[3];
- int32_t vacc1x0 = vacc0x0;
- int32_t vacc1x1 = vacc0x1;
- int32_t vacc1x2 = vacc0x2;
- int32_t vacc1x3 = vacc0x3;
- w = (const void*) ((const int32_t*) w + 4);
-
- size_t p = ks;
- do {
- const int8_t* restrict a0 = a[0];
- assert(a0 != NULL);
- if XNN_UNPREDICTABLE(a0 != zero) {
- a0 = (const int8_t*) ((uintptr_t) a0 + a_offset);
- }
- const int8_t* restrict a1 = a[1];
- assert(a1 != NULL);
- if XNN_UNPREDICTABLE(a1 != zero) {
- a1 = (const int8_t*) ((uintptr_t) a1 + a_offset);
- }
- a += 2;
-
- size_t k = kc;
- do {
- const int32_t va0 = (int32_t) *a0++;
- const int32_t va1 = (int32_t) *a1++;
-
- const int32_t vb0 = (int32_t) ((const int8_t*) w)[0];
- const int32_t vb1 = (int32_t) ((const int8_t*) w)[1];
- const int32_t vb2 = (int32_t) ((const int8_t*) w)[2];
- const int32_t vb3 = (int32_t) ((const int8_t*) w)[3];
- w = (const void*) ((const int8_t*) w + 4);
-
- vacc0x0 += va0 * vb0;
- vacc0x1 += va0 * vb1;
- vacc0x2 += va0 * vb2;
- vacc0x3 += va0 * vb3;
- vacc1x0 += va1 * vb0;
- vacc1x1 += va1 * vb1;
- vacc1x2 += va1 * vb2;
- vacc1x3 += va1 * vb3;
-
- k -= sizeof(int8_t);
- } while (k != 0);
- p -= 2 * sizeof(void*);
- } while (p != 0);
-
- const int32_t vmultiplier = params->rndnu_scalar.multiplier;
- const int64_t vproduct0x0 = (int64_t) vacc0x0 * (int64_t) vmultiplier;
- const int64_t vproduct0x1 = (int64_t) vacc0x1 * (int64_t) vmultiplier;
- const int64_t vproduct0x2 = (int64_t) vacc0x2 * (int64_t) vmultiplier;
- const int64_t vproduct0x3 = (int64_t) vacc0x3 * (int64_t) vmultiplier;
- const int64_t vproduct1x0 = (int64_t) vacc1x0 * (int64_t) vmultiplier;
- const int64_t vproduct1x1 = (int64_t) vacc1x1 * (int64_t) vmultiplier;
- const int64_t vproduct1x2 = (int64_t) vacc1x2 * (int64_t) vmultiplier;
- const int64_t vproduct1x3 = (int64_t) vacc1x3 * (int64_t) vmultiplier;
-
- const uint32_t shift = params->rndnu_scalar.shift;
- const int64_t rounding = params->rndnu_scalar.rounding;
- int32_t vout0x0 = (int32_t) asr_s64(vproduct0x0 + rounding, shift);
- int32_t vout0x1 = (int32_t) asr_s64(vproduct0x1 + rounding, shift);
- int32_t vout0x2 = (int32_t) asr_s64(vproduct0x2 + rounding, shift);
- int32_t vout0x3 = (int32_t) asr_s64(vproduct0x3 + rounding, shift);
- int32_t vout1x0 = (int32_t) asr_s64(vproduct1x0 + rounding, shift);
- int32_t vout1x1 = (int32_t) asr_s64(vproduct1x1 + rounding, shift);
- int32_t vout1x2 = (int32_t) asr_s64(vproduct1x2 + rounding, shift);
- int32_t vout1x3 = (int32_t) asr_s64(vproduct1x3 + rounding, shift);
-
- const int32_t voutput_min_less_zero_point = params->rndnu_scalar.output_min_less_zero_point;
- vout0x0 = math_max_s32(vout0x0, voutput_min_less_zero_point);
- vout0x1 = math_max_s32(vout0x1, voutput_min_less_zero_point);
- vout0x2 = math_max_s32(vout0x2, voutput_min_less_zero_point);
- vout0x3 = math_max_s32(vout0x3, voutput_min_less_zero_point);
- vout1x0 = math_max_s32(vout1x0, voutput_min_less_zero_point);
- vout1x1 = math_max_s32(vout1x1, voutput_min_less_zero_point);
- vout1x2 = math_max_s32(vout1x2, voutput_min_less_zero_point);
- vout1x3 = math_max_s32(vout1x3, voutput_min_less_zero_point);
-
- const int32_t voutput_max_less_zero_point = params->rndnu_scalar.output_max_less_zero_point;
- vout0x0 = math_min_s32(vout0x0, voutput_max_less_zero_point);
- vout0x1 = math_min_s32(vout0x1, voutput_max_less_zero_point);
- vout0x2 = math_min_s32(vout0x2, voutput_max_less_zero_point);
- vout0x3 = math_min_s32(vout0x3, voutput_max_less_zero_point);
- vout1x0 = math_min_s32(vout1x0, voutput_max_less_zero_point);
- vout1x1 = math_min_s32(vout1x1, voutput_max_less_zero_point);
- vout1x2 = math_min_s32(vout1x2, voutput_max_less_zero_point);
- vout1x3 = math_min_s32(vout1x3, voutput_max_less_zero_point);
-
- const int32_t voutput_zero_point = params->rndnu_scalar.output_zero_point;
- vout0x0 += voutput_zero_point;
- vout0x1 += voutput_zero_point;
- vout0x2 += voutput_zero_point;
- vout0x3 += voutput_zero_point;
- vout1x0 += voutput_zero_point;
- vout1x1 += voutput_zero_point;
- vout1x2 += voutput_zero_point;
- vout1x3 += voutput_zero_point;
-
- if XNN_LIKELY(nc >= 4) {
- c1[0] = (int8_t) vout1x0;
- c1[1] = (int8_t) vout1x1;
- c1[2] = (int8_t) vout1x2;
- c1[3] = (int8_t) vout1x3;
- c0[0] = (int8_t) vout0x0;
- c0[1] = (int8_t) vout0x1;
- c0[2] = (int8_t) vout0x2;
- c0[3] = (int8_t) vout0x3;
-
- c1 = (int8_t*) ((uintptr_t) c1 + cn_stride);
- c0 = (int8_t*) ((uintptr_t) c0 + cn_stride);
-
- a = (const int8_t**restrict) ((uintptr_t) a - ks);
- nc -= 4;
- } else {
- if (nc & 2) {
- c1[0] = (int8_t) vout1x0;
- c1[1] = (int8_t) vout1x1;
- vout1x0 = vout1x2;
- c1 += 2;
- c0[0] = (int8_t) vout0x0;
- c0[1] = (int8_t) vout0x1;
- vout0x0 = vout0x2;
- c0 += 2;
- }
- if (nc & 1) {
- c1[0] = (int8_t) vout1x0;
- c0[0] = (int8_t) vout0x0;
- }
-
- nc = 0;
- }
- } while (nc != 0);
-}
diff --git a/src/qs8-igemm/gen/3x2-minmax-fp32-scalar-magic.c b/src/qs8-igemm/gen/3x2-minmax-fp32-scalar-fmagic.c
similarity index 94%
rename from src/qs8-igemm/gen/3x2-minmax-fp32-scalar-magic.c
rename to src/qs8-igemm/gen/3x2-minmax-fp32-scalar-fmagic.c
index 287221b..ca54a75 100644
--- a/src/qs8-igemm/gen/3x2-minmax-fp32-scalar-magic.c
+++ b/src/qs8-igemm/gen/3x2-minmax-fp32-scalar-fmagic.c
@@ -15,7 +15,7 @@
#include <xnnpack/gemm.h>
-void xnn_qs8_igemm_minmax_fp32_ukernel_3x2__scalar_magic(
+void xnn_qs8_igemm_minmax_fp32_ukernel_3x2__scalar_fmagic(
size_t mr,
size_t nc,
size_t kc,
@@ -106,7 +106,7 @@
float vfpacc2x0 = (float) vacc2x0;
float vfpacc2x1 = (float) vacc2x1;
- const float vscale = params->fp32_scalar_magic.scale;
+ const float vscale = params->fp32_scalar_fmagic.scale;
vfpacc0x0 *= vscale;
vfpacc0x1 *= vscale;
vfpacc1x0 *= vscale;
@@ -114,7 +114,7 @@
vfpacc2x0 *= vscale;
vfpacc2x1 *= vscale;
- const float voutput_min_less_zero_point = params->fp32_scalar_magic.output_min_less_zero_point;
+ const float voutput_min_less_zero_point = params->fp32_scalar_fmagic.output_min_less_zero_point;
vfpacc0x0 = math_max_f32(vfpacc0x0, voutput_min_less_zero_point);
vfpacc0x1 = math_max_f32(vfpacc0x1, voutput_min_less_zero_point);
vfpacc1x0 = math_max_f32(vfpacc1x0, voutput_min_less_zero_point);
@@ -122,7 +122,7 @@
vfpacc2x0 = math_max_f32(vfpacc2x0, voutput_min_less_zero_point);
vfpacc2x1 = math_max_f32(vfpacc2x1, voutput_min_less_zero_point);
- const float voutput_max_less_zero_point = params->fp32_scalar_magic.output_max_less_zero_point;
+ const float voutput_max_less_zero_point = params->fp32_scalar_fmagic.output_max_less_zero_point;
vfpacc0x0 = math_min_f32(vfpacc0x0, voutput_max_less_zero_point);
vfpacc0x1 = math_min_f32(vfpacc0x1, voutput_max_less_zero_point);
vfpacc1x0 = math_min_f32(vfpacc1x0, voutput_max_less_zero_point);
@@ -130,7 +130,7 @@
vfpacc2x0 = math_min_f32(vfpacc2x0, voutput_max_less_zero_point);
vfpacc2x1 = math_min_f32(vfpacc2x1, voutput_max_less_zero_point);
- const float vmagic_bias = params->fp32_scalar_magic.magic_bias;
+ const float vmagic_bias = params->fp32_scalar_fmagic.magic_bias;
vfpacc0x0 += vmagic_bias;
vfpacc0x1 += vmagic_bias;
vfpacc1x0 += vmagic_bias;
@@ -138,7 +138,7 @@
vfpacc2x0 += vmagic_bias;
vfpacc2x1 += vmagic_bias;
- const int32_t vmagic_bias_less_output_zero_point = params->fp32_scalar_magic.magic_bias_less_output_zero_point;
+ const int32_t vmagic_bias_less_output_zero_point = params->fp32_scalar_fmagic.magic_bias_less_output_zero_point;
int32_t vout0x0 = (int32_t) fp32_to_bits(vfpacc0x0) - vmagic_bias_less_output_zero_point;
int32_t vout0x1 = (int32_t) fp32_to_bits(vfpacc0x1) - vmagic_bias_less_output_zero_point;
int32_t vout1x0 = (int32_t) fp32_to_bits(vfpacc1x0) - vmagic_bias_less_output_zero_point;
diff --git a/src/qs8-igemm/gen/3x2-minmax-fp32-scalar-lrint.c b/src/qs8-igemm/gen/3x2-minmax-fp32-scalar-lrint.c
deleted file mode 100644
index 8c87cd3..0000000
--- a/src/qs8-igemm/gen/3x2-minmax-fp32-scalar-lrint.c
+++ /dev/null
@@ -1,171 +0,0 @@
-// Auto-generated file. Do not edit!
-// Template: src/qs8-igemm/scalar.c.in
-// Generator: tools/xngen
-//
-// Copyright 2021 Google LLC
-//
-// This source code is licensed under the BSD-style license found in the
-// LICENSE file in the root directory of this source tree.
-
-#include <assert.h>
-#include <math.h>
-
-#include <xnnpack/math.h>
-#include <xnnpack/gemm.h>
-
-
-void xnn_qs8_igemm_minmax_fp32_ukernel_3x2__scalar_lrint(
- size_t mr,
- size_t nc,
- size_t kc,
- size_t ks,
- const int8_t**restrict a,
- const void*restrict w,
- int8_t*restrict c,
- size_t cm_stride,
- size_t cn_stride,
- size_t a_offset,
- const int8_t* zero,
- const union xnn_qs8_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
-{
- assert(mr != 0);
- assert(mr <= 3);
- assert(nc != 0);
- assert(kc != 0);
- assert(ks != 0);
- assert(ks % (3 * sizeof(void*)) == 0);
- assert(a != NULL);
- assert(w != NULL);
- assert(c != NULL);
-
- int8_t* c0 = c;
- int8_t* c1 = (int8_t*) ((uintptr_t) c0 + cm_stride);
- if XNN_UNPREDICTABLE(mr < 2) {
- c1 = c0;
- }
- int8_t* c2 = (int8_t*) ((uintptr_t) c1 + cm_stride);
- if XNN_UNPREDICTABLE(mr <= 2) {
- c2 = c1;
- }
-
- do {
- int32_t vacc0x0 = ((const int32_t*) w)[0];
- int32_t vacc0x1 = ((const int32_t*) w)[1];
- int32_t vacc1x0 = vacc0x0;
- int32_t vacc1x1 = vacc0x1;
- int32_t vacc2x0 = vacc0x0;
- int32_t vacc2x1 = vacc0x1;
- w = (const void*) ((const int32_t*) w + 2);
-
- size_t p = ks;
- do {
- const int8_t* restrict a0 = a[0];
- assert(a0 != NULL);
- if XNN_UNPREDICTABLE(a0 != zero) {
- a0 = (const int8_t*) ((uintptr_t) a0 + a_offset);
- }
- const int8_t* restrict a1 = a[1];
- assert(a1 != NULL);
- if XNN_UNPREDICTABLE(a1 != zero) {
- a1 = (const int8_t*) ((uintptr_t) a1 + a_offset);
- }
- const int8_t* restrict a2 = a[2];
- assert(a2 != NULL);
- if XNN_UNPREDICTABLE(a2 != zero) {
- a2 = (const int8_t*) ((uintptr_t) a2 + a_offset);
- }
- a += 3;
-
- size_t k = kc;
- do {
- const int32_t va0 = (int32_t) *a0++;
- const int32_t va1 = (int32_t) *a1++;
- const int32_t va2 = (int32_t) *a2++;
-
- const int32_t vb0 = (int32_t) ((const int8_t*) w)[0];
- const int32_t vb1 = (int32_t) ((const int8_t*) w)[1];
- w = (const void*) ((const int8_t*) w + 2);
-
- vacc0x0 += va0 * vb0;
- vacc0x1 += va0 * vb1;
- vacc1x0 += va1 * vb0;
- vacc1x1 += va1 * vb1;
- vacc2x0 += va2 * vb0;
- vacc2x1 += va2 * vb1;
-
- k -= sizeof(int8_t);
- } while (k != 0);
- p -= 3 * sizeof(void*);
- } while (p != 0);
-
- float vfpacc0x0 = (float) vacc0x0;
- float vfpacc0x1 = (float) vacc0x1;
- float vfpacc1x0 = (float) vacc1x0;
- float vfpacc1x1 = (float) vacc1x1;
- float vfpacc2x0 = (float) vacc2x0;
- float vfpacc2x1 = (float) vacc2x1;
-
- const float vscale = params->fp32_scalar_lrint.scale;
- vfpacc0x0 *= vscale;
- vfpacc0x1 *= vscale;
- vfpacc1x0 *= vscale;
- vfpacc1x1 *= vscale;
- vfpacc2x0 *= vscale;
- vfpacc2x1 *= vscale;
-
- long vrndacc0x0 = lrintf(vfpacc0x0);
- long vrndacc0x1 = lrintf(vfpacc0x1);
- long vrndacc1x0 = lrintf(vfpacc1x0);
- long vrndacc1x1 = lrintf(vfpacc1x1);
- long vrndacc2x0 = lrintf(vfpacc2x0);
- long vrndacc2x1 = lrintf(vfpacc2x1);
-
- const long voutput_min_less_zero_point = params->fp32_scalar_lrint.output_min_less_zero_point;
- vrndacc0x0 = XNN_UNPREDICTABLE(vrndacc0x0 < voutput_min_less_zero_point) ? voutput_min_less_zero_point : vrndacc0x0;
- vrndacc0x1 = XNN_UNPREDICTABLE(vrndacc0x1 < voutput_min_less_zero_point) ? voutput_min_less_zero_point : vrndacc0x1;
- vrndacc1x0 = XNN_UNPREDICTABLE(vrndacc1x0 < voutput_min_less_zero_point) ? voutput_min_less_zero_point : vrndacc1x0;
- vrndacc1x1 = XNN_UNPREDICTABLE(vrndacc1x1 < voutput_min_less_zero_point) ? voutput_min_less_zero_point : vrndacc1x1;
- vrndacc2x0 = XNN_UNPREDICTABLE(vrndacc2x0 < voutput_min_less_zero_point) ? voutput_min_less_zero_point : vrndacc2x0;
- vrndacc2x1 = XNN_UNPREDICTABLE(vrndacc2x1 < voutput_min_less_zero_point) ? voutput_min_less_zero_point : vrndacc2x1;
-
- const long voutput_max_less_zero_point = params->fp32_scalar_lrint.output_max_less_zero_point;
- vrndacc0x0 = XNN_UNPREDICTABLE(vrndacc0x0 > voutput_max_less_zero_point) ? voutput_max_less_zero_point : vrndacc0x0;
- vrndacc0x1 = XNN_UNPREDICTABLE(vrndacc0x1 > voutput_max_less_zero_point) ? voutput_max_less_zero_point : vrndacc0x1;
- vrndacc1x0 = XNN_UNPREDICTABLE(vrndacc1x0 > voutput_max_less_zero_point) ? voutput_max_less_zero_point : vrndacc1x0;
- vrndacc1x1 = XNN_UNPREDICTABLE(vrndacc1x1 > voutput_max_less_zero_point) ? voutput_max_less_zero_point : vrndacc1x1;
- vrndacc2x0 = XNN_UNPREDICTABLE(vrndacc2x0 > voutput_max_less_zero_point) ? voutput_max_less_zero_point : vrndacc2x0;
- vrndacc2x1 = XNN_UNPREDICTABLE(vrndacc2x1 > voutput_max_less_zero_point) ? voutput_max_less_zero_point : vrndacc2x1;
-
- const int32_t voutput_zero_point = params->fp32_scalar_lrint.output_zero_point;
- int32_t vout0x0 = (int32_t) vrndacc0x0 + voutput_zero_point;
- int32_t vout0x1 = (int32_t) vrndacc0x1 + voutput_zero_point;
- int32_t vout1x0 = (int32_t) vrndacc1x0 + voutput_zero_point;
- int32_t vout1x1 = (int32_t) vrndacc1x1 + voutput_zero_point;
- int32_t vout2x0 = (int32_t) vrndacc2x0 + voutput_zero_point;
- int32_t vout2x1 = (int32_t) vrndacc2x1 + voutput_zero_point;
-
- if XNN_LIKELY(nc >= 2) {
- c2[0] = (int8_t) vout2x0;
- c2[1] = (int8_t) vout2x1;
- c1[0] = (int8_t) vout1x0;
- c1[1] = (int8_t) vout1x1;
- c0[0] = (int8_t) vout0x0;
- c0[1] = (int8_t) vout0x1;
-
- c2 = (int8_t*) ((uintptr_t) c2 + cn_stride);
- c1 = (int8_t*) ((uintptr_t) c1 + cn_stride);
- c0 = (int8_t*) ((uintptr_t) c0 + cn_stride);
-
- a = (const int8_t**restrict) ((uintptr_t) a - ks);
- nc -= 2;
- } else {
- if (nc & 1) {
- c2[0] = (int8_t) vout2x0;
- c1[0] = (int8_t) vout1x0;
- c0[0] = (int8_t) vout0x0;
- }
-
- nc = 0;
- }
- } while (nc != 0);
-}
diff --git a/src/qs8-igemm/gen/3x2-minmax-fp32-scalar-magic.c b/src/qs8-igemm/gen/3x2-minmax-fp32-scalar-lrintf.c
similarity index 79%
copy from src/qs8-igemm/gen/3x2-minmax-fp32-scalar-magic.c
copy to src/qs8-igemm/gen/3x2-minmax-fp32-scalar-lrintf.c
index 287221b..5472ba3 100644
--- a/src/qs8-igemm/gen/3x2-minmax-fp32-scalar-magic.c
+++ b/src/qs8-igemm/gen/3x2-minmax-fp32-scalar-lrintf.c
@@ -8,14 +8,13 @@
// LICENSE file in the root directory of this source tree.
#include <assert.h>
-
-#include <fp16.h>
+#include <math.h>
#include <xnnpack/math.h>
#include <xnnpack/gemm.h>
-void xnn_qs8_igemm_minmax_fp32_ukernel_3x2__scalar_magic(
+void xnn_qs8_igemm_minmax_fp32_ukernel_3x2__scalar_lrintf(
size_t mr,
size_t nc,
size_t kc,
@@ -106,7 +105,7 @@
float vfpacc2x0 = (float) vacc2x0;
float vfpacc2x1 = (float) vacc2x1;
- const float vscale = params->fp32_scalar_magic.scale;
+ const float vscale = params->fp32_scalar_lrintf.scale;
vfpacc0x0 *= vscale;
vfpacc0x1 *= vscale;
vfpacc1x0 *= vscale;
@@ -114,7 +113,7 @@
vfpacc2x0 *= vscale;
vfpacc2x1 *= vscale;
- const float voutput_min_less_zero_point = params->fp32_scalar_magic.output_min_less_zero_point;
+ const float voutput_min_less_zero_point = params->fp32_scalar_lrintf.output_min_less_zero_point;
vfpacc0x0 = math_max_f32(vfpacc0x0, voutput_min_less_zero_point);
vfpacc0x1 = math_max_f32(vfpacc0x1, voutput_min_less_zero_point);
vfpacc1x0 = math_max_f32(vfpacc1x0, voutput_min_less_zero_point);
@@ -122,7 +121,7 @@
vfpacc2x0 = math_max_f32(vfpacc2x0, voutput_min_less_zero_point);
vfpacc2x1 = math_max_f32(vfpacc2x1, voutput_min_less_zero_point);
- const float voutput_max_less_zero_point = params->fp32_scalar_magic.output_max_less_zero_point;
+ const float voutput_max_less_zero_point = params->fp32_scalar_lrintf.output_max_less_zero_point;
vfpacc0x0 = math_min_f32(vfpacc0x0, voutput_max_less_zero_point);
vfpacc0x1 = math_min_f32(vfpacc0x1, voutput_max_less_zero_point);
vfpacc1x0 = math_min_f32(vfpacc1x0, voutput_max_less_zero_point);
@@ -130,21 +129,20 @@
vfpacc2x0 = math_min_f32(vfpacc2x0, voutput_max_less_zero_point);
vfpacc2x1 = math_min_f32(vfpacc2x1, voutput_max_less_zero_point);
- const float vmagic_bias = params->fp32_scalar_magic.magic_bias;
- vfpacc0x0 += vmagic_bias;
- vfpacc0x1 += vmagic_bias;
- vfpacc1x0 += vmagic_bias;
- vfpacc1x1 += vmagic_bias;
- vfpacc2x0 += vmagic_bias;
- vfpacc2x1 += vmagic_bias;
+ const int32_t vrndacc0x0 = (int32_t) lrintf(vfpacc0x0);
+ const int32_t vrndacc0x1 = (int32_t) lrintf(vfpacc0x1);
+ const int32_t vrndacc1x0 = (int32_t) lrintf(vfpacc1x0);
+ const int32_t vrndacc1x1 = (int32_t) lrintf(vfpacc1x1);
+ const int32_t vrndacc2x0 = (int32_t) lrintf(vfpacc2x0);
+ const int32_t vrndacc2x1 = (int32_t) lrintf(vfpacc2x1);
- const int32_t vmagic_bias_less_output_zero_point = params->fp32_scalar_magic.magic_bias_less_output_zero_point;
- int32_t vout0x0 = (int32_t) fp32_to_bits(vfpacc0x0) - vmagic_bias_less_output_zero_point;
- int32_t vout0x1 = (int32_t) fp32_to_bits(vfpacc0x1) - vmagic_bias_less_output_zero_point;
- int32_t vout1x0 = (int32_t) fp32_to_bits(vfpacc1x0) - vmagic_bias_less_output_zero_point;
- int32_t vout1x1 = (int32_t) fp32_to_bits(vfpacc1x1) - vmagic_bias_less_output_zero_point;
- int32_t vout2x0 = (int32_t) fp32_to_bits(vfpacc2x0) - vmagic_bias_less_output_zero_point;
- int32_t vout2x1 = (int32_t) fp32_to_bits(vfpacc2x1) - vmagic_bias_less_output_zero_point;
+ const int32_t voutput_zero_point = params->fp32_scalar_lrintf.output_zero_point;
+ int32_t vout0x0 = vrndacc0x0 + voutput_zero_point;
+ int32_t vout0x1 = vrndacc0x1 + voutput_zero_point;
+ int32_t vout1x0 = vrndacc1x0 + voutput_zero_point;
+ int32_t vout1x1 = vrndacc1x1 + voutput_zero_point;
+ int32_t vout2x0 = vrndacc2x0 + voutput_zero_point;
+ int32_t vout2x1 = vrndacc2x1 + voutput_zero_point;
if XNN_LIKELY(nc >= 2) {
c2[0] = (int8_t) vout2x0;
diff --git a/src/qs8-igemm/gen/3x2-minmax-rndnu-scalar.c b/src/qs8-igemm/gen/3x2-minmax-rndnu-scalar.c
deleted file mode 100644
index 6e666fe..0000000
--- a/src/qs8-igemm/gen/3x2-minmax-rndnu-scalar.c
+++ /dev/null
@@ -1,165 +0,0 @@
-// Auto-generated file. Do not edit!
-// Template: src/qs8-igemm/scalar.c.in
-// Generator: tools/xngen
-//
-// Copyright 2021 Google LLC
-//
-// This source code is licensed under the BSD-style license found in the
-// LICENSE file in the root directory of this source tree.
-
-#include <assert.h>
-
-#include <xnnpack/math.h>
-#include <xnnpack/gemm.h>
-
-
-void xnn_qs8_igemm_minmax_rndnu_ukernel_3x2__scalar(
- size_t mr,
- size_t nc,
- size_t kc,
- size_t ks,
- const int8_t**restrict a,
- const void*restrict w,
- int8_t*restrict c,
- size_t cm_stride,
- size_t cn_stride,
- size_t a_offset,
- const int8_t* zero,
- const union xnn_qs8_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
-{
- assert(mr != 0);
- assert(mr <= 3);
- assert(nc != 0);
- assert(kc != 0);
- assert(ks != 0);
- assert(ks % (3 * sizeof(void*)) == 0);
- assert(a != NULL);
- assert(w != NULL);
- assert(c != NULL);
-
- int8_t* c0 = c;
- int8_t* c1 = (int8_t*) ((uintptr_t) c0 + cm_stride);
- if XNN_UNPREDICTABLE(mr < 2) {
- c1 = c0;
- }
- int8_t* c2 = (int8_t*) ((uintptr_t) c1 + cm_stride);
- if XNN_UNPREDICTABLE(mr <= 2) {
- c2 = c1;
- }
-
- do {
- int32_t vacc0x0 = ((const int32_t*) w)[0];
- int32_t vacc0x1 = ((const int32_t*) w)[1];
- int32_t vacc1x0 = vacc0x0;
- int32_t vacc1x1 = vacc0x1;
- int32_t vacc2x0 = vacc0x0;
- int32_t vacc2x1 = vacc0x1;
- w = (const void*) ((const int32_t*) w + 2);
-
- size_t p = ks;
- do {
- const int8_t* restrict a0 = a[0];
- assert(a0 != NULL);
- if XNN_UNPREDICTABLE(a0 != zero) {
- a0 = (const int8_t*) ((uintptr_t) a0 + a_offset);
- }
- const int8_t* restrict a1 = a[1];
- assert(a1 != NULL);
- if XNN_UNPREDICTABLE(a1 != zero) {
- a1 = (const int8_t*) ((uintptr_t) a1 + a_offset);
- }
- const int8_t* restrict a2 = a[2];
- assert(a2 != NULL);
- if XNN_UNPREDICTABLE(a2 != zero) {
- a2 = (const int8_t*) ((uintptr_t) a2 + a_offset);
- }
- a += 3;
-
- size_t k = kc;
- do {
- const int32_t va0 = (int32_t) *a0++;
- const int32_t va1 = (int32_t) *a1++;
- const int32_t va2 = (int32_t) *a2++;
-
- const int32_t vb0 = (int32_t) ((const int8_t*) w)[0];
- const int32_t vb1 = (int32_t) ((const int8_t*) w)[1];
- w = (const void*) ((const int8_t*) w + 2);
-
- vacc0x0 += va0 * vb0;
- vacc0x1 += va0 * vb1;
- vacc1x0 += va1 * vb0;
- vacc1x1 += va1 * vb1;
- vacc2x0 += va2 * vb0;
- vacc2x1 += va2 * vb1;
-
- k -= sizeof(int8_t);
- } while (k != 0);
- p -= 3 * sizeof(void*);
- } while (p != 0);
-
- const int32_t vmultiplier = params->rndnu_scalar.multiplier;
- const int64_t vproduct0x0 = (int64_t) vacc0x0 * (int64_t) vmultiplier;
- const int64_t vproduct0x1 = (int64_t) vacc0x1 * (int64_t) vmultiplier;
- const int64_t vproduct1x0 = (int64_t) vacc1x0 * (int64_t) vmultiplier;
- const int64_t vproduct1x1 = (int64_t) vacc1x1 * (int64_t) vmultiplier;
- const int64_t vproduct2x0 = (int64_t) vacc2x0 * (int64_t) vmultiplier;
- const int64_t vproduct2x1 = (int64_t) vacc2x1 * (int64_t) vmultiplier;
-
- const uint32_t shift = params->rndnu_scalar.shift;
- const int64_t rounding = params->rndnu_scalar.rounding;
- int32_t vout0x0 = (int32_t) asr_s64(vproduct0x0 + rounding, shift);
- int32_t vout0x1 = (int32_t) asr_s64(vproduct0x1 + rounding, shift);
- int32_t vout1x0 = (int32_t) asr_s64(vproduct1x0 + rounding, shift);
- int32_t vout1x1 = (int32_t) asr_s64(vproduct1x1 + rounding, shift);
- int32_t vout2x0 = (int32_t) asr_s64(vproduct2x0 + rounding, shift);
- int32_t vout2x1 = (int32_t) asr_s64(vproduct2x1 + rounding, shift);
-
- const int32_t voutput_min_less_zero_point = params->rndnu_scalar.output_min_less_zero_point;
- vout0x0 = math_max_s32(vout0x0, voutput_min_less_zero_point);
- vout0x1 = math_max_s32(vout0x1, voutput_min_less_zero_point);
- vout1x0 = math_max_s32(vout1x0, voutput_min_less_zero_point);
- vout1x1 = math_max_s32(vout1x1, voutput_min_less_zero_point);
- vout2x0 = math_max_s32(vout2x0, voutput_min_less_zero_point);
- vout2x1 = math_max_s32(vout2x1, voutput_min_less_zero_point);
-
- const int32_t voutput_max_less_zero_point = params->rndnu_scalar.output_max_less_zero_point;
- vout0x0 = math_min_s32(vout0x0, voutput_max_less_zero_point);
- vout0x1 = math_min_s32(vout0x1, voutput_max_less_zero_point);
- vout1x0 = math_min_s32(vout1x0, voutput_max_less_zero_point);
- vout1x1 = math_min_s32(vout1x1, voutput_max_less_zero_point);
- vout2x0 = math_min_s32(vout2x0, voutput_max_less_zero_point);
- vout2x1 = math_min_s32(vout2x1, voutput_max_less_zero_point);
-
- const int32_t voutput_zero_point = params->rndnu_scalar.output_zero_point;
- vout0x0 += voutput_zero_point;
- vout0x1 += voutput_zero_point;
- vout1x0 += voutput_zero_point;
- vout1x1 += voutput_zero_point;
- vout2x0 += voutput_zero_point;
- vout2x1 += voutput_zero_point;
-
- if XNN_LIKELY(nc >= 2) {
- c2[0] = (int8_t) vout2x0;
- c2[1] = (int8_t) vout2x1;
- c1[0] = (int8_t) vout1x0;
- c1[1] = (int8_t) vout1x1;
- c0[0] = (int8_t) vout0x0;
- c0[1] = (int8_t) vout0x1;
-
- c2 = (int8_t*) ((uintptr_t) c2 + cn_stride);
- c1 = (int8_t*) ((uintptr_t) c1 + cn_stride);
- c0 = (int8_t*) ((uintptr_t) c0 + cn_stride);
-
- a = (const int8_t**restrict) ((uintptr_t) a - ks);
- nc -= 2;
- } else {
- if (nc & 1) {
- c2[0] = (int8_t) vout2x0;
- c1[0] = (int8_t) vout1x0;
- c0[0] = (int8_t) vout0x0;
- }
-
- nc = 0;
- }
- } while (nc != 0);
-}
diff --git a/src/qs8-igemm/gen/3x4-minmax-fp32-scalar-magic.c b/src/qs8-igemm/gen/3x4-minmax-fp32-scalar-fmagic.c
similarity index 96%
rename from src/qs8-igemm/gen/3x4-minmax-fp32-scalar-magic.c
rename to src/qs8-igemm/gen/3x4-minmax-fp32-scalar-fmagic.c
index a3812a0..7102beb 100644
--- a/src/qs8-igemm/gen/3x4-minmax-fp32-scalar-magic.c
+++ b/src/qs8-igemm/gen/3x4-minmax-fp32-scalar-fmagic.c
@@ -15,7 +15,7 @@
#include <xnnpack/gemm.h>
-void xnn_qs8_igemm_minmax_fp32_ukernel_3x4__scalar_magic(
+void xnn_qs8_igemm_minmax_fp32_ukernel_3x4__scalar_fmagic(
size_t mr,
size_t nc,
size_t kc,
@@ -126,7 +126,7 @@
float vfpacc2x2 = (float) vacc2x2;
float vfpacc2x3 = (float) vacc2x3;
- const float vscale = params->fp32_scalar_magic.scale;
+ const float vscale = params->fp32_scalar_fmagic.scale;
vfpacc0x0 *= vscale;
vfpacc0x1 *= vscale;
vfpacc0x2 *= vscale;
@@ -140,7 +140,7 @@
vfpacc2x2 *= vscale;
vfpacc2x3 *= vscale;
- const float voutput_min_less_zero_point = params->fp32_scalar_magic.output_min_less_zero_point;
+ const float voutput_min_less_zero_point = params->fp32_scalar_fmagic.output_min_less_zero_point;
vfpacc0x0 = math_max_f32(vfpacc0x0, voutput_min_less_zero_point);
vfpacc0x1 = math_max_f32(vfpacc0x1, voutput_min_less_zero_point);
vfpacc0x2 = math_max_f32(vfpacc0x2, voutput_min_less_zero_point);
@@ -154,7 +154,7 @@
vfpacc2x2 = math_max_f32(vfpacc2x2, voutput_min_less_zero_point);
vfpacc2x3 = math_max_f32(vfpacc2x3, voutput_min_less_zero_point);
- const float voutput_max_less_zero_point = params->fp32_scalar_magic.output_max_less_zero_point;
+ const float voutput_max_less_zero_point = params->fp32_scalar_fmagic.output_max_less_zero_point;
vfpacc0x0 = math_min_f32(vfpacc0x0, voutput_max_less_zero_point);
vfpacc0x1 = math_min_f32(vfpacc0x1, voutput_max_less_zero_point);
vfpacc0x2 = math_min_f32(vfpacc0x2, voutput_max_less_zero_point);
@@ -168,7 +168,7 @@
vfpacc2x2 = math_min_f32(vfpacc2x2, voutput_max_less_zero_point);
vfpacc2x3 = math_min_f32(vfpacc2x3, voutput_max_less_zero_point);
- const float vmagic_bias = params->fp32_scalar_magic.magic_bias;
+ const float vmagic_bias = params->fp32_scalar_fmagic.magic_bias;
vfpacc0x0 += vmagic_bias;
vfpacc0x1 += vmagic_bias;
vfpacc0x2 += vmagic_bias;
@@ -182,7 +182,7 @@
vfpacc2x2 += vmagic_bias;
vfpacc2x3 += vmagic_bias;
- const int32_t vmagic_bias_less_output_zero_point = params->fp32_scalar_magic.magic_bias_less_output_zero_point;
+ const int32_t vmagic_bias_less_output_zero_point = params->fp32_scalar_fmagic.magic_bias_less_output_zero_point;
int32_t vout0x0 = (int32_t) fp32_to_bits(vfpacc0x0) - vmagic_bias_less_output_zero_point;
int32_t vout0x1 = (int32_t) fp32_to_bits(vfpacc0x1) - vmagic_bias_less_output_zero_point;
int32_t vout0x2 = (int32_t) fp32_to_bits(vfpacc0x2) - vmagic_bias_less_output_zero_point;
diff --git a/src/qs8-igemm/gen/3x4-minmax-fp32-scalar-lrint.c b/src/qs8-igemm/gen/3x4-minmax-fp32-scalar-lrint.c
deleted file mode 100644
index 7cbf62b..0000000
--- a/src/qs8-igemm/gen/3x4-minmax-fp32-scalar-lrint.c
+++ /dev/null
@@ -1,241 +0,0 @@
-// Auto-generated file. Do not edit!
-// Template: src/qs8-igemm/scalar.c.in
-// Generator: tools/xngen
-//
-// Copyright 2021 Google LLC
-//
-// This source code is licensed under the BSD-style license found in the
-// LICENSE file in the root directory of this source tree.
-
-#include <assert.h>
-#include <math.h>
-
-#include <xnnpack/math.h>
-#include <xnnpack/gemm.h>
-
-
-void xnn_qs8_igemm_minmax_fp32_ukernel_3x4__scalar_lrint(
- size_t mr,
- size_t nc,
- size_t kc,
- size_t ks,
- const int8_t**restrict a,
- const void*restrict w,
- int8_t*restrict c,
- size_t cm_stride,
- size_t cn_stride,
- size_t a_offset,
- const int8_t* zero,
- const union xnn_qs8_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
-{
- assert(mr != 0);
- assert(mr <= 3);
- assert(nc != 0);
- assert(kc != 0);
- assert(ks != 0);
- assert(ks % (3 * sizeof(void*)) == 0);
- assert(a != NULL);
- assert(w != NULL);
- assert(c != NULL);
-
- int8_t* c0 = c;
- int8_t* c1 = (int8_t*) ((uintptr_t) c0 + cm_stride);
- if XNN_UNPREDICTABLE(mr < 2) {
- c1 = c0;
- }
- int8_t* c2 = (int8_t*) ((uintptr_t) c1 + cm_stride);
- if XNN_UNPREDICTABLE(mr <= 2) {
- c2 = c1;
- }
-
- do {
- int32_t vacc0x0 = ((const int32_t*) w)[0];
- int32_t vacc0x1 = ((const int32_t*) w)[1];
- int32_t vacc0x2 = ((const int32_t*) w)[2];
- int32_t vacc0x3 = ((const int32_t*) w)[3];
- int32_t vacc1x0 = vacc0x0;
- int32_t vacc1x1 = vacc0x1;
- int32_t vacc1x2 = vacc0x2;
- int32_t vacc1x3 = vacc0x3;
- int32_t vacc2x0 = vacc0x0;
- int32_t vacc2x1 = vacc0x1;
- int32_t vacc2x2 = vacc0x2;
- int32_t vacc2x3 = vacc0x3;
- w = (const void*) ((const int32_t*) w + 4);
-
- size_t p = ks;
- do {
- const int8_t* restrict a0 = a[0];
- assert(a0 != NULL);
- if XNN_UNPREDICTABLE(a0 != zero) {
- a0 = (const int8_t*) ((uintptr_t) a0 + a_offset);
- }
- const int8_t* restrict a1 = a[1];
- assert(a1 != NULL);
- if XNN_UNPREDICTABLE(a1 != zero) {
- a1 = (const int8_t*) ((uintptr_t) a1 + a_offset);
- }
- const int8_t* restrict a2 = a[2];
- assert(a2 != NULL);
- if XNN_UNPREDICTABLE(a2 != zero) {
- a2 = (const int8_t*) ((uintptr_t) a2 + a_offset);
- }
- a += 3;
-
- size_t k = kc;
- do {
- const int32_t va0 = (int32_t) *a0++;
- const int32_t va1 = (int32_t) *a1++;
- const int32_t va2 = (int32_t) *a2++;
-
- const int32_t vb0 = (int32_t) ((const int8_t*) w)[0];
- const int32_t vb1 = (int32_t) ((const int8_t*) w)[1];
- const int32_t vb2 = (int32_t) ((const int8_t*) w)[2];
- const int32_t vb3 = (int32_t) ((const int8_t*) w)[3];
- w = (const void*) ((const int8_t*) w + 4);
-
- vacc0x0 += va0 * vb0;
- vacc0x1 += va0 * vb1;
- vacc0x2 += va0 * vb2;
- vacc0x3 += va0 * vb3;
- vacc1x0 += va1 * vb0;
- vacc1x1 += va1 * vb1;
- vacc1x2 += va1 * vb2;
- vacc1x3 += va1 * vb3;
- vacc2x0 += va2 * vb0;
- vacc2x1 += va2 * vb1;
- vacc2x2 += va2 * vb2;
- vacc2x3 += va2 * vb3;
-
- k -= sizeof(int8_t);
- } while (k != 0);
- p -= 3 * sizeof(void*);
- } while (p != 0);
-
- float vfpacc0x0 = (float) vacc0x0;
- float vfpacc0x1 = (float) vacc0x1;
- float vfpacc0x2 = (float) vacc0x2;
- float vfpacc0x3 = (float) vacc0x3;
- float vfpacc1x0 = (float) vacc1x0;
- float vfpacc1x1 = (float) vacc1x1;
- float vfpacc1x2 = (float) vacc1x2;
- float vfpacc1x3 = (float) vacc1x3;
- float vfpacc2x0 = (float) vacc2x0;
- float vfpacc2x1 = (float) vacc2x1;
- float vfpacc2x2 = (float) vacc2x2;
- float vfpacc2x3 = (float) vacc2x3;
-
- const float vscale = params->fp32_scalar_lrint.scale;
- vfpacc0x0 *= vscale;
- vfpacc0x1 *= vscale;
- vfpacc0x2 *= vscale;
- vfpacc0x3 *= vscale;
- vfpacc1x0 *= vscale;
- vfpacc1x1 *= vscale;
- vfpacc1x2 *= vscale;
- vfpacc1x3 *= vscale;
- vfpacc2x0 *= vscale;
- vfpacc2x1 *= vscale;
- vfpacc2x2 *= vscale;
- vfpacc2x3 *= vscale;
-
- long vrndacc0x0 = lrintf(vfpacc0x0);
- long vrndacc0x1 = lrintf(vfpacc0x1);
- long vrndacc0x2 = lrintf(vfpacc0x2);
- long vrndacc0x3 = lrintf(vfpacc0x3);
- long vrndacc1x0 = lrintf(vfpacc1x0);
- long vrndacc1x1 = lrintf(vfpacc1x1);
- long vrndacc1x2 = lrintf(vfpacc1x2);
- long vrndacc1x3 = lrintf(vfpacc1x3);
- long vrndacc2x0 = lrintf(vfpacc2x0);
- long vrndacc2x1 = lrintf(vfpacc2x1);
- long vrndacc2x2 = lrintf(vfpacc2x2);
- long vrndacc2x3 = lrintf(vfpacc2x3);
-
- const long voutput_min_less_zero_point = params->fp32_scalar_lrint.output_min_less_zero_point;
- vrndacc0x0 = XNN_UNPREDICTABLE(vrndacc0x0 < voutput_min_less_zero_point) ? voutput_min_less_zero_point : vrndacc0x0;
- vrndacc0x1 = XNN_UNPREDICTABLE(vrndacc0x1 < voutput_min_less_zero_point) ? voutput_min_less_zero_point : vrndacc0x1;
- vrndacc0x2 = XNN_UNPREDICTABLE(vrndacc0x2 < voutput_min_less_zero_point) ? voutput_min_less_zero_point : vrndacc0x2;
- vrndacc0x3 = XNN_UNPREDICTABLE(vrndacc0x3 < voutput_min_less_zero_point) ? voutput_min_less_zero_point : vrndacc0x3;
- vrndacc1x0 = XNN_UNPREDICTABLE(vrndacc1x0 < voutput_min_less_zero_point) ? voutput_min_less_zero_point : vrndacc1x0;
- vrndacc1x1 = XNN_UNPREDICTABLE(vrndacc1x1 < voutput_min_less_zero_point) ? voutput_min_less_zero_point : vrndacc1x1;
- vrndacc1x2 = XNN_UNPREDICTABLE(vrndacc1x2 < voutput_min_less_zero_point) ? voutput_min_less_zero_point : vrndacc1x2;
- vrndacc1x3 = XNN_UNPREDICTABLE(vrndacc1x3 < voutput_min_less_zero_point) ? voutput_min_less_zero_point : vrndacc1x3;
- vrndacc2x0 = XNN_UNPREDICTABLE(vrndacc2x0 < voutput_min_less_zero_point) ? voutput_min_less_zero_point : vrndacc2x0;
- vrndacc2x1 = XNN_UNPREDICTABLE(vrndacc2x1 < voutput_min_less_zero_point) ? voutput_min_less_zero_point : vrndacc2x1;
- vrndacc2x2 = XNN_UNPREDICTABLE(vrndacc2x2 < voutput_min_less_zero_point) ? voutput_min_less_zero_point : vrndacc2x2;
- vrndacc2x3 = XNN_UNPREDICTABLE(vrndacc2x3 < voutput_min_less_zero_point) ? voutput_min_less_zero_point : vrndacc2x3;
-
- const long voutput_max_less_zero_point = params->fp32_scalar_lrint.output_max_less_zero_point;
- vrndacc0x0 = XNN_UNPREDICTABLE(vrndacc0x0 > voutput_max_less_zero_point) ? voutput_max_less_zero_point : vrndacc0x0;
- vrndacc0x1 = XNN_UNPREDICTABLE(vrndacc0x1 > voutput_max_less_zero_point) ? voutput_max_less_zero_point : vrndacc0x1;
- vrndacc0x2 = XNN_UNPREDICTABLE(vrndacc0x2 > voutput_max_less_zero_point) ? voutput_max_less_zero_point : vrndacc0x2;
- vrndacc0x3 = XNN_UNPREDICTABLE(vrndacc0x3 > voutput_max_less_zero_point) ? voutput_max_less_zero_point : vrndacc0x3;
- vrndacc1x0 = XNN_UNPREDICTABLE(vrndacc1x0 > voutput_max_less_zero_point) ? voutput_max_less_zero_point : vrndacc1x0;
- vrndacc1x1 = XNN_UNPREDICTABLE(vrndacc1x1 > voutput_max_less_zero_point) ? voutput_max_less_zero_point : vrndacc1x1;
- vrndacc1x2 = XNN_UNPREDICTABLE(vrndacc1x2 > voutput_max_less_zero_point) ? voutput_max_less_zero_point : vrndacc1x2;
- vrndacc1x3 = XNN_UNPREDICTABLE(vrndacc1x3 > voutput_max_less_zero_point) ? voutput_max_less_zero_point : vrndacc1x3;
- vrndacc2x0 = XNN_UNPREDICTABLE(vrndacc2x0 > voutput_max_less_zero_point) ? voutput_max_less_zero_point : vrndacc2x0;
- vrndacc2x1 = XNN_UNPREDICTABLE(vrndacc2x1 > voutput_max_less_zero_point) ? voutput_max_less_zero_point : vrndacc2x1;
- vrndacc2x2 = XNN_UNPREDICTABLE(vrndacc2x2 > voutput_max_less_zero_point) ? voutput_max_less_zero_point : vrndacc2x2;
- vrndacc2x3 = XNN_UNPREDICTABLE(vrndacc2x3 > voutput_max_less_zero_point) ? voutput_max_less_zero_point : vrndacc2x3;
-
- const int32_t voutput_zero_point = params->fp32_scalar_lrint.output_zero_point;
- int32_t vout0x0 = (int32_t) vrndacc0x0 + voutput_zero_point;
- int32_t vout0x1 = (int32_t) vrndacc0x1 + voutput_zero_point;
- int32_t vout0x2 = (int32_t) vrndacc0x2 + voutput_zero_point;
- int32_t vout0x3 = (int32_t) vrndacc0x3 + voutput_zero_point;
- int32_t vout1x0 = (int32_t) vrndacc1x0 + voutput_zero_point;
- int32_t vout1x1 = (int32_t) vrndacc1x1 + voutput_zero_point;
- int32_t vout1x2 = (int32_t) vrndacc1x2 + voutput_zero_point;
- int32_t vout1x3 = (int32_t) vrndacc1x3 + voutput_zero_point;
- int32_t vout2x0 = (int32_t) vrndacc2x0 + voutput_zero_point;
- int32_t vout2x1 = (int32_t) vrndacc2x1 + voutput_zero_point;
- int32_t vout2x2 = (int32_t) vrndacc2x2 + voutput_zero_point;
- int32_t vout2x3 = (int32_t) vrndacc2x3 + voutput_zero_point;
-
- if XNN_LIKELY(nc >= 4) {
- c2[0] = (int8_t) vout2x0;
- c2[1] = (int8_t) vout2x1;
- c2[2] = (int8_t) vout2x2;
- c2[3] = (int8_t) vout2x3;
- c1[0] = (int8_t) vout1x0;
- c1[1] = (int8_t) vout1x1;
- c1[2] = (int8_t) vout1x2;
- c1[3] = (int8_t) vout1x3;
- c0[0] = (int8_t) vout0x0;
- c0[1] = (int8_t) vout0x1;
- c0[2] = (int8_t) vout0x2;
- c0[3] = (int8_t) vout0x3;
-
- c2 = (int8_t*) ((uintptr_t) c2 + cn_stride);
- c1 = (int8_t*) ((uintptr_t) c1 + cn_stride);
- c0 = (int8_t*) ((uintptr_t) c0 + cn_stride);
-
- a = (const int8_t**restrict) ((uintptr_t) a - ks);
- nc -= 4;
- } else {
- if (nc & 2) {
- c2[0] = (int8_t) vout2x0;
- c2[1] = (int8_t) vout2x1;
- vout2x0 = vout2x2;
- c2 += 2;
- c1[0] = (int8_t) vout1x0;
- c1[1] = (int8_t) vout1x1;
- vout1x0 = vout1x2;
- c1 += 2;
- c0[0] = (int8_t) vout0x0;
- c0[1] = (int8_t) vout0x1;
- vout0x0 = vout0x2;
- c0 += 2;
- }
- if (nc & 1) {
- c2[0] = (int8_t) vout2x0;
- c1[0] = (int8_t) vout1x0;
- c0[0] = (int8_t) vout0x0;
- }
-
- nc = 0;
- }
- } while (nc != 0);
-}
diff --git a/src/qs8-igemm/gen/3x4-minmax-fp32-scalar-magic.c b/src/qs8-igemm/gen/3x4-minmax-fp32-scalar-lrintf.c
similarity index 78%
copy from src/qs8-igemm/gen/3x4-minmax-fp32-scalar-magic.c
copy to src/qs8-igemm/gen/3x4-minmax-fp32-scalar-lrintf.c
index a3812a0..aa10244 100644
--- a/src/qs8-igemm/gen/3x4-minmax-fp32-scalar-magic.c
+++ b/src/qs8-igemm/gen/3x4-minmax-fp32-scalar-lrintf.c
@@ -8,14 +8,13 @@
// LICENSE file in the root directory of this source tree.
#include <assert.h>
-
-#include <fp16.h>
+#include <math.h>
#include <xnnpack/math.h>
#include <xnnpack/gemm.h>
-void xnn_qs8_igemm_minmax_fp32_ukernel_3x4__scalar_magic(
+void xnn_qs8_igemm_minmax_fp32_ukernel_3x4__scalar_lrintf(
size_t mr,
size_t nc,
size_t kc,
@@ -126,7 +125,7 @@
float vfpacc2x2 = (float) vacc2x2;
float vfpacc2x3 = (float) vacc2x3;
- const float vscale = params->fp32_scalar_magic.scale;
+ const float vscale = params->fp32_scalar_lrintf.scale;
vfpacc0x0 *= vscale;
vfpacc0x1 *= vscale;
vfpacc0x2 *= vscale;
@@ -140,7 +139,7 @@
vfpacc2x2 *= vscale;
vfpacc2x3 *= vscale;
- const float voutput_min_less_zero_point = params->fp32_scalar_magic.output_min_less_zero_point;
+ const float voutput_min_less_zero_point = params->fp32_scalar_lrintf.output_min_less_zero_point;
vfpacc0x0 = math_max_f32(vfpacc0x0, voutput_min_less_zero_point);
vfpacc0x1 = math_max_f32(vfpacc0x1, voutput_min_less_zero_point);
vfpacc0x2 = math_max_f32(vfpacc0x2, voutput_min_less_zero_point);
@@ -154,7 +153,7 @@
vfpacc2x2 = math_max_f32(vfpacc2x2, voutput_min_less_zero_point);
vfpacc2x3 = math_max_f32(vfpacc2x3, voutput_min_less_zero_point);
- const float voutput_max_less_zero_point = params->fp32_scalar_magic.output_max_less_zero_point;
+ const float voutput_max_less_zero_point = params->fp32_scalar_lrintf.output_max_less_zero_point;
vfpacc0x0 = math_min_f32(vfpacc0x0, voutput_max_less_zero_point);
vfpacc0x1 = math_min_f32(vfpacc0x1, voutput_max_less_zero_point);
vfpacc0x2 = math_min_f32(vfpacc0x2, voutput_max_less_zero_point);
@@ -168,33 +167,32 @@
vfpacc2x2 = math_min_f32(vfpacc2x2, voutput_max_less_zero_point);
vfpacc2x3 = math_min_f32(vfpacc2x3, voutput_max_less_zero_point);
- const float vmagic_bias = params->fp32_scalar_magic.magic_bias;
- vfpacc0x0 += vmagic_bias;
- vfpacc0x1 += vmagic_bias;
- vfpacc0x2 += vmagic_bias;
- vfpacc0x3 += vmagic_bias;
- vfpacc1x0 += vmagic_bias;
- vfpacc1x1 += vmagic_bias;
- vfpacc1x2 += vmagic_bias;
- vfpacc1x3 += vmagic_bias;
- vfpacc2x0 += vmagic_bias;
- vfpacc2x1 += vmagic_bias;
- vfpacc2x2 += vmagic_bias;
- vfpacc2x3 += vmagic_bias;
+ const int32_t vrndacc0x0 = (int32_t) lrintf(vfpacc0x0);
+ const int32_t vrndacc0x1 = (int32_t) lrintf(vfpacc0x1);
+ const int32_t vrndacc0x2 = (int32_t) lrintf(vfpacc0x2);
+ const int32_t vrndacc0x3 = (int32_t) lrintf(vfpacc0x3);
+ const int32_t vrndacc1x0 = (int32_t) lrintf(vfpacc1x0);
+ const int32_t vrndacc1x1 = (int32_t) lrintf(vfpacc1x1);
+ const int32_t vrndacc1x2 = (int32_t) lrintf(vfpacc1x2);
+ const int32_t vrndacc1x3 = (int32_t) lrintf(vfpacc1x3);
+ const int32_t vrndacc2x0 = (int32_t) lrintf(vfpacc2x0);
+ const int32_t vrndacc2x1 = (int32_t) lrintf(vfpacc2x1);
+ const int32_t vrndacc2x2 = (int32_t) lrintf(vfpacc2x2);
+ const int32_t vrndacc2x3 = (int32_t) lrintf(vfpacc2x3);
- const int32_t vmagic_bias_less_output_zero_point = params->fp32_scalar_magic.magic_bias_less_output_zero_point;
- int32_t vout0x0 = (int32_t) fp32_to_bits(vfpacc0x0) - vmagic_bias_less_output_zero_point;
- int32_t vout0x1 = (int32_t) fp32_to_bits(vfpacc0x1) - vmagic_bias_less_output_zero_point;
- int32_t vout0x2 = (int32_t) fp32_to_bits(vfpacc0x2) - vmagic_bias_less_output_zero_point;
- int32_t vout0x3 = (int32_t) fp32_to_bits(vfpacc0x3) - vmagic_bias_less_output_zero_point;
- int32_t vout1x0 = (int32_t) fp32_to_bits(vfpacc1x0) - vmagic_bias_less_output_zero_point;
- int32_t vout1x1 = (int32_t) fp32_to_bits(vfpacc1x1) - vmagic_bias_less_output_zero_point;
- int32_t vout1x2 = (int32_t) fp32_to_bits(vfpacc1x2) - vmagic_bias_less_output_zero_point;
- int32_t vout1x3 = (int32_t) fp32_to_bits(vfpacc1x3) - vmagic_bias_less_output_zero_point;
- int32_t vout2x0 = (int32_t) fp32_to_bits(vfpacc2x0) - vmagic_bias_less_output_zero_point;
- int32_t vout2x1 = (int32_t) fp32_to_bits(vfpacc2x1) - vmagic_bias_less_output_zero_point;
- int32_t vout2x2 = (int32_t) fp32_to_bits(vfpacc2x2) - vmagic_bias_less_output_zero_point;
- int32_t vout2x3 = (int32_t) fp32_to_bits(vfpacc2x3) - vmagic_bias_less_output_zero_point;
+ const int32_t voutput_zero_point = params->fp32_scalar_lrintf.output_zero_point;
+ int32_t vout0x0 = vrndacc0x0 + voutput_zero_point;
+ int32_t vout0x1 = vrndacc0x1 + voutput_zero_point;
+ int32_t vout0x2 = vrndacc0x2 + voutput_zero_point;
+ int32_t vout0x3 = vrndacc0x3 + voutput_zero_point;
+ int32_t vout1x0 = vrndacc1x0 + voutput_zero_point;
+ int32_t vout1x1 = vrndacc1x1 + voutput_zero_point;
+ int32_t vout1x2 = vrndacc1x2 + voutput_zero_point;
+ int32_t vout1x3 = vrndacc1x3 + voutput_zero_point;
+ int32_t vout2x0 = vrndacc2x0 + voutput_zero_point;
+ int32_t vout2x1 = vrndacc2x1 + voutput_zero_point;
+ int32_t vout2x2 = vrndacc2x2 + voutput_zero_point;
+ int32_t vout2x3 = vrndacc2x3 + voutput_zero_point;
if XNN_LIKELY(nc >= 4) {
c2[0] = (int8_t) vout2x0;
diff --git a/src/qs8-igemm/gen/3x4-minmax-rndnu-scalar.c b/src/qs8-igemm/gen/3x4-minmax-rndnu-scalar.c
deleted file mode 100644
index fcb3e77..0000000
--- a/src/qs8-igemm/gen/3x4-minmax-rndnu-scalar.c
+++ /dev/null
@@ -1,229 +0,0 @@
-// Auto-generated file. Do not edit!
-// Template: src/qs8-igemm/scalar.c.in
-// Generator: tools/xngen
-//
-// Copyright 2021 Google LLC
-//
-// This source code is licensed under the BSD-style license found in the
-// LICENSE file in the root directory of this source tree.
-
-#include <assert.h>
-
-#include <xnnpack/math.h>
-#include <xnnpack/gemm.h>
-
-
-void xnn_qs8_igemm_minmax_rndnu_ukernel_3x4__scalar(
- size_t mr,
- size_t nc,
- size_t kc,
- size_t ks,
- const int8_t**restrict a,
- const void*restrict w,
- int8_t*restrict c,
- size_t cm_stride,
- size_t cn_stride,
- size_t a_offset,
- const int8_t* zero,
- const union xnn_qs8_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
-{
- assert(mr != 0);
- assert(mr <= 3);
- assert(nc != 0);
- assert(kc != 0);
- assert(ks != 0);
- assert(ks % (3 * sizeof(void*)) == 0);
- assert(a != NULL);
- assert(w != NULL);
- assert(c != NULL);
-
- int8_t* c0 = c;
- int8_t* c1 = (int8_t*) ((uintptr_t) c0 + cm_stride);
- if XNN_UNPREDICTABLE(mr < 2) {
- c1 = c0;
- }
- int8_t* c2 = (int8_t*) ((uintptr_t) c1 + cm_stride);
- if XNN_UNPREDICTABLE(mr <= 2) {
- c2 = c1;
- }
-
- do {
- int32_t vacc0x0 = ((const int32_t*) w)[0];
- int32_t vacc0x1 = ((const int32_t*) w)[1];
- int32_t vacc0x2 = ((const int32_t*) w)[2];
- int32_t vacc0x3 = ((const int32_t*) w)[3];
- int32_t vacc1x0 = vacc0x0;
- int32_t vacc1x1 = vacc0x1;
- int32_t vacc1x2 = vacc0x2;
- int32_t vacc1x3 = vacc0x3;
- int32_t vacc2x0 = vacc0x0;
- int32_t vacc2x1 = vacc0x1;
- int32_t vacc2x2 = vacc0x2;
- int32_t vacc2x3 = vacc0x3;
- w = (const void*) ((const int32_t*) w + 4);
-
- size_t p = ks;
- do {
- const int8_t* restrict a0 = a[0];
- assert(a0 != NULL);
- if XNN_UNPREDICTABLE(a0 != zero) {
- a0 = (const int8_t*) ((uintptr_t) a0 + a_offset);
- }
- const int8_t* restrict a1 = a[1];
- assert(a1 != NULL);
- if XNN_UNPREDICTABLE(a1 != zero) {
- a1 = (const int8_t*) ((uintptr_t) a1 + a_offset);
- }
- const int8_t* restrict a2 = a[2];
- assert(a2 != NULL);
- if XNN_UNPREDICTABLE(a2 != zero) {
- a2 = (const int8_t*) ((uintptr_t) a2 + a_offset);
- }
- a += 3;
-
- size_t k = kc;
- do {
- const int32_t va0 = (int32_t) *a0++;
- const int32_t va1 = (int32_t) *a1++;
- const int32_t va2 = (int32_t) *a2++;
-
- const int32_t vb0 = (int32_t) ((const int8_t*) w)[0];
- const int32_t vb1 = (int32_t) ((const int8_t*) w)[1];
- const int32_t vb2 = (int32_t) ((const int8_t*) w)[2];
- const int32_t vb3 = (int32_t) ((const int8_t*) w)[3];
- w = (const void*) ((const int8_t*) w + 4);
-
- vacc0x0 += va0 * vb0;
- vacc0x1 += va0 * vb1;
- vacc0x2 += va0 * vb2;
- vacc0x3 += va0 * vb3;
- vacc1x0 += va1 * vb0;
- vacc1x1 += va1 * vb1;
- vacc1x2 += va1 * vb2;
- vacc1x3 += va1 * vb3;
- vacc2x0 += va2 * vb0;
- vacc2x1 += va2 * vb1;
- vacc2x2 += va2 * vb2;
- vacc2x3 += va2 * vb3;
-
- k -= sizeof(int8_t);
- } while (k != 0);
- p -= 3 * sizeof(void*);
- } while (p != 0);
-
- const int32_t vmultiplier = params->rndnu_scalar.multiplier;
- const int64_t vproduct0x0 = (int64_t) vacc0x0 * (int64_t) vmultiplier;
- const int64_t vproduct0x1 = (int64_t) vacc0x1 * (int64_t) vmultiplier;
- const int64_t vproduct0x2 = (int64_t) vacc0x2 * (int64_t) vmultiplier;
- const int64_t vproduct0x3 = (int64_t) vacc0x3 * (int64_t) vmultiplier;
- const int64_t vproduct1x0 = (int64_t) vacc1x0 * (int64_t) vmultiplier;
- const int64_t vproduct1x1 = (int64_t) vacc1x1 * (int64_t) vmultiplier;
- const int64_t vproduct1x2 = (int64_t) vacc1x2 * (int64_t) vmultiplier;
- const int64_t vproduct1x3 = (int64_t) vacc1x3 * (int64_t) vmultiplier;
- const int64_t vproduct2x0 = (int64_t) vacc2x0 * (int64_t) vmultiplier;
- const int64_t vproduct2x1 = (int64_t) vacc2x1 * (int64_t) vmultiplier;
- const int64_t vproduct2x2 = (int64_t) vacc2x2 * (int64_t) vmultiplier;
- const int64_t vproduct2x3 = (int64_t) vacc2x3 * (int64_t) vmultiplier;
-
- const uint32_t shift = params->rndnu_scalar.shift;
- const int64_t rounding = params->rndnu_scalar.rounding;
- int32_t vout0x0 = (int32_t) asr_s64(vproduct0x0 + rounding, shift);
- int32_t vout0x1 = (int32_t) asr_s64(vproduct0x1 + rounding, shift);
- int32_t vout0x2 = (int32_t) asr_s64(vproduct0x2 + rounding, shift);
- int32_t vout0x3 = (int32_t) asr_s64(vproduct0x3 + rounding, shift);
- int32_t vout1x0 = (int32_t) asr_s64(vproduct1x0 + rounding, shift);
- int32_t vout1x1 = (int32_t) asr_s64(vproduct1x1 + rounding, shift);
- int32_t vout1x2 = (int32_t) asr_s64(vproduct1x2 + rounding, shift);
- int32_t vout1x3 = (int32_t) asr_s64(vproduct1x3 + rounding, shift);
- int32_t vout2x0 = (int32_t) asr_s64(vproduct2x0 + rounding, shift);
- int32_t vout2x1 = (int32_t) asr_s64(vproduct2x1 + rounding, shift);
- int32_t vout2x2 = (int32_t) asr_s64(vproduct2x2 + rounding, shift);
- int32_t vout2x3 = (int32_t) asr_s64(vproduct2x3 + rounding, shift);
-
- const int32_t voutput_min_less_zero_point = params->rndnu_scalar.output_min_less_zero_point;
- vout0x0 = math_max_s32(vout0x0, voutput_min_less_zero_point);
- vout0x1 = math_max_s32(vout0x1, voutput_min_less_zero_point);
- vout0x2 = math_max_s32(vout0x2, voutput_min_less_zero_point);
- vout0x3 = math_max_s32(vout0x3, voutput_min_less_zero_point);
- vout1x0 = math_max_s32(vout1x0, voutput_min_less_zero_point);
- vout1x1 = math_max_s32(vout1x1, voutput_min_less_zero_point);
- vout1x2 = math_max_s32(vout1x2, voutput_min_less_zero_point);
- vout1x3 = math_max_s32(vout1x3, voutput_min_less_zero_point);
- vout2x0 = math_max_s32(vout2x0, voutput_min_less_zero_point);
- vout2x1 = math_max_s32(vout2x1, voutput_min_less_zero_point);
- vout2x2 = math_max_s32(vout2x2, voutput_min_less_zero_point);
- vout2x3 = math_max_s32(vout2x3, voutput_min_less_zero_point);
-
- const int32_t voutput_max_less_zero_point = params->rndnu_scalar.output_max_less_zero_point;
- vout0x0 = math_min_s32(vout0x0, voutput_max_less_zero_point);
- vout0x1 = math_min_s32(vout0x1, voutput_max_less_zero_point);
- vout0x2 = math_min_s32(vout0x2, voutput_max_less_zero_point);
- vout0x3 = math_min_s32(vout0x3, voutput_max_less_zero_point);
- vout1x0 = math_min_s32(vout1x0, voutput_max_less_zero_point);
- vout1x1 = math_min_s32(vout1x1, voutput_max_less_zero_point);
- vout1x2 = math_min_s32(vout1x2, voutput_max_less_zero_point);
- vout1x3 = math_min_s32(vout1x3, voutput_max_less_zero_point);
- vout2x0 = math_min_s32(vout2x0, voutput_max_less_zero_point);
- vout2x1 = math_min_s32(vout2x1, voutput_max_less_zero_point);
- vout2x2 = math_min_s32(vout2x2, voutput_max_less_zero_point);
- vout2x3 = math_min_s32(vout2x3, voutput_max_less_zero_point);
-
- const int32_t voutput_zero_point = params->rndnu_scalar.output_zero_point;
- vout0x0 += voutput_zero_point;
- vout0x1 += voutput_zero_point;
- vout0x2 += voutput_zero_point;
- vout0x3 += voutput_zero_point;
- vout1x0 += voutput_zero_point;
- vout1x1 += voutput_zero_point;
- vout1x2 += voutput_zero_point;
- vout1x3 += voutput_zero_point;
- vout2x0 += voutput_zero_point;
- vout2x1 += voutput_zero_point;
- vout2x2 += voutput_zero_point;
- vout2x3 += voutput_zero_point;
-
- if XNN_LIKELY(nc >= 4) {
- c2[0] = (int8_t) vout2x0;
- c2[1] = (int8_t) vout2x1;
- c2[2] = (int8_t) vout2x2;
- c2[3] = (int8_t) vout2x3;
- c1[0] = (int8_t) vout1x0;
- c1[1] = (int8_t) vout1x1;
- c1[2] = (int8_t) vout1x2;
- c1[3] = (int8_t) vout1x3;
- c0[0] = (int8_t) vout0x0;
- c0[1] = (int8_t) vout0x1;
- c0[2] = (int8_t) vout0x2;
- c0[3] = (int8_t) vout0x3;
-
- c2 = (int8_t*) ((uintptr_t) c2 + cn_stride);
- c1 = (int8_t*) ((uintptr_t) c1 + cn_stride);
- c0 = (int8_t*) ((uintptr_t) c0 + cn_stride);
-
- a = (const int8_t**restrict) ((uintptr_t) a - ks);
- nc -= 4;
- } else {
- if (nc & 2) {
- c2[0] = (int8_t) vout2x0;
- c2[1] = (int8_t) vout2x1;
- vout2x0 = vout2x2;
- c2 += 2;
- c1[0] = (int8_t) vout1x0;
- c1[1] = (int8_t) vout1x1;
- vout1x0 = vout1x2;
- c1 += 2;
- c0[0] = (int8_t) vout0x0;
- c0[1] = (int8_t) vout0x1;
- vout0x0 = vout0x2;
- c0 += 2;
- }
- if (nc & 1) {
- c2[0] = (int8_t) vout2x0;
- c1[0] = (int8_t) vout1x0;
- c0[0] = (int8_t) vout0x0;
- }
-
- nc = 0;
- }
- } while (nc != 0);
-}
diff --git a/src/qs8-igemm/gen/4x2-minmax-fp32-scalar-magic.c b/src/qs8-igemm/gen/4x2-minmax-fp32-scalar-fmagic.c
similarity index 95%
rename from src/qs8-igemm/gen/4x2-minmax-fp32-scalar-magic.c
rename to src/qs8-igemm/gen/4x2-minmax-fp32-scalar-fmagic.c
index e4f79ca..dfb66ed 100644
--- a/src/qs8-igemm/gen/4x2-minmax-fp32-scalar-magic.c
+++ b/src/qs8-igemm/gen/4x2-minmax-fp32-scalar-fmagic.c
@@ -15,7 +15,7 @@
#include <xnnpack/gemm.h>
-void xnn_qs8_igemm_minmax_fp32_ukernel_4x2__scalar_magic(
+void xnn_qs8_igemm_minmax_fp32_ukernel_4x2__scalar_fmagic(
size_t mr,
size_t nc,
size_t kc,
@@ -122,7 +122,7 @@
float vfpacc3x0 = (float) vacc3x0;
float vfpacc3x1 = (float) vacc3x1;
- const float vscale = params->fp32_scalar_magic.scale;
+ const float vscale = params->fp32_scalar_fmagic.scale;
vfpacc0x0 *= vscale;
vfpacc0x1 *= vscale;
vfpacc1x0 *= vscale;
@@ -132,7 +132,7 @@
vfpacc3x0 *= vscale;
vfpacc3x1 *= vscale;
- const float voutput_min_less_zero_point = params->fp32_scalar_magic.output_min_less_zero_point;
+ const float voutput_min_less_zero_point = params->fp32_scalar_fmagic.output_min_less_zero_point;
vfpacc0x0 = math_max_f32(vfpacc0x0, voutput_min_less_zero_point);
vfpacc0x1 = math_max_f32(vfpacc0x1, voutput_min_less_zero_point);
vfpacc1x0 = math_max_f32(vfpacc1x0, voutput_min_less_zero_point);
@@ -142,7 +142,7 @@
vfpacc3x0 = math_max_f32(vfpacc3x0, voutput_min_less_zero_point);
vfpacc3x1 = math_max_f32(vfpacc3x1, voutput_min_less_zero_point);
- const float voutput_max_less_zero_point = params->fp32_scalar_magic.output_max_less_zero_point;
+ const float voutput_max_less_zero_point = params->fp32_scalar_fmagic.output_max_less_zero_point;
vfpacc0x0 = math_min_f32(vfpacc0x0, voutput_max_less_zero_point);
vfpacc0x1 = math_min_f32(vfpacc0x1, voutput_max_less_zero_point);
vfpacc1x0 = math_min_f32(vfpacc1x0, voutput_max_less_zero_point);
@@ -152,7 +152,7 @@
vfpacc3x0 = math_min_f32(vfpacc3x0, voutput_max_less_zero_point);
vfpacc3x1 = math_min_f32(vfpacc3x1, voutput_max_less_zero_point);
- const float vmagic_bias = params->fp32_scalar_magic.magic_bias;
+ const float vmagic_bias = params->fp32_scalar_fmagic.magic_bias;
vfpacc0x0 += vmagic_bias;
vfpacc0x1 += vmagic_bias;
vfpacc1x0 += vmagic_bias;
@@ -162,7 +162,7 @@
vfpacc3x0 += vmagic_bias;
vfpacc3x1 += vmagic_bias;
- const int32_t vmagic_bias_less_output_zero_point = params->fp32_scalar_magic.magic_bias_less_output_zero_point;
+ const int32_t vmagic_bias_less_output_zero_point = params->fp32_scalar_fmagic.magic_bias_less_output_zero_point;
int32_t vout0x0 = (int32_t) fp32_to_bits(vfpacc0x0) - vmagic_bias_less_output_zero_point;
int32_t vout0x1 = (int32_t) fp32_to_bits(vfpacc0x1) - vmagic_bias_less_output_zero_point;
int32_t vout1x0 = (int32_t) fp32_to_bits(vfpacc1x0) - vmagic_bias_less_output_zero_point;
diff --git a/src/qs8-igemm/gen/4x2-minmax-fp32-scalar-lrint.c b/src/qs8-igemm/gen/4x2-minmax-fp32-scalar-lrint.c
deleted file mode 100644
index dc7011b..0000000
--- a/src/qs8-igemm/gen/4x2-minmax-fp32-scalar-lrint.c
+++ /dev/null
@@ -1,201 +0,0 @@
-// Auto-generated file. Do not edit!
-// Template: src/qs8-igemm/scalar.c.in
-// Generator: tools/xngen
-//
-// Copyright 2021 Google LLC
-//
-// This source code is licensed under the BSD-style license found in the
-// LICENSE file in the root directory of this source tree.
-
-#include <assert.h>
-#include <math.h>
-
-#include <xnnpack/math.h>
-#include <xnnpack/gemm.h>
-
-
-void xnn_qs8_igemm_minmax_fp32_ukernel_4x2__scalar_lrint(
- size_t mr,
- size_t nc,
- size_t kc,
- size_t ks,
- const int8_t**restrict a,
- const void*restrict w,
- int8_t*restrict c,
- size_t cm_stride,
- size_t cn_stride,
- size_t a_offset,
- const int8_t* zero,
- const union xnn_qs8_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
-{
- assert(mr != 0);
- assert(mr <= 4);
- assert(nc != 0);
- assert(kc != 0);
- assert(ks != 0);
- assert(ks % (4 * sizeof(void*)) == 0);
- assert(a != NULL);
- assert(w != NULL);
- assert(c != NULL);
-
- int8_t* c0 = c;
- int8_t* c1 = (int8_t*) ((uintptr_t) c0 + cm_stride);
- if XNN_UNPREDICTABLE(mr < 2) {
- c1 = c0;
- }
- int8_t* c2 = (int8_t*) ((uintptr_t) c1 + cm_stride);
- if XNN_UNPREDICTABLE(mr <= 2) {
- c2 = c1;
- }
- int8_t* c3 = (int8_t*) ((uintptr_t) c2 + cm_stride);
- if XNN_UNPREDICTABLE(mr != 4) {
- c3 = c2;
- }
-
- do {
- int32_t vacc0x0 = ((const int32_t*) w)[0];
- int32_t vacc0x1 = ((const int32_t*) w)[1];
- int32_t vacc1x0 = vacc0x0;
- int32_t vacc1x1 = vacc0x1;
- int32_t vacc2x0 = vacc0x0;
- int32_t vacc2x1 = vacc0x1;
- int32_t vacc3x0 = vacc0x0;
- int32_t vacc3x1 = vacc0x1;
- w = (const void*) ((const int32_t*) w + 2);
-
- size_t p = ks;
- do {
- const int8_t* restrict a0 = a[0];
- assert(a0 != NULL);
- if XNN_UNPREDICTABLE(a0 != zero) {
- a0 = (const int8_t*) ((uintptr_t) a0 + a_offset);
- }
- const int8_t* restrict a1 = a[1];
- assert(a1 != NULL);
- if XNN_UNPREDICTABLE(a1 != zero) {
- a1 = (const int8_t*) ((uintptr_t) a1 + a_offset);
- }
- const int8_t* restrict a2 = a[2];
- assert(a2 != NULL);
- if XNN_UNPREDICTABLE(a2 != zero) {
- a2 = (const int8_t*) ((uintptr_t) a2 + a_offset);
- }
- const int8_t* restrict a3 = a[3];
- assert(a3 != NULL);
- if XNN_UNPREDICTABLE(a3 != zero) {
- a3 = (const int8_t*) ((uintptr_t) a3 + a_offset);
- }
- a += 4;
-
- size_t k = kc;
- do {
- const int32_t va0 = (int32_t) *a0++;
- const int32_t va1 = (int32_t) *a1++;
- const int32_t va2 = (int32_t) *a2++;
- const int32_t va3 = (int32_t) *a3++;
-
- const int32_t vb0 = (int32_t) ((const int8_t*) w)[0];
- const int32_t vb1 = (int32_t) ((const int8_t*) w)[1];
- w = (const void*) ((const int8_t*) w + 2);
-
- vacc0x0 += va0 * vb0;
- vacc0x1 += va0 * vb1;
- vacc1x0 += va1 * vb0;
- vacc1x1 += va1 * vb1;
- vacc2x0 += va2 * vb0;
- vacc2x1 += va2 * vb1;
- vacc3x0 += va3 * vb0;
- vacc3x1 += va3 * vb1;
-
- k -= sizeof(int8_t);
- } while (k != 0);
- p -= 4 * sizeof(void*);
- } while (p != 0);
-
- float vfpacc0x0 = (float) vacc0x0;
- float vfpacc0x1 = (float) vacc0x1;
- float vfpacc1x0 = (float) vacc1x0;
- float vfpacc1x1 = (float) vacc1x1;
- float vfpacc2x0 = (float) vacc2x0;
- float vfpacc2x1 = (float) vacc2x1;
- float vfpacc3x0 = (float) vacc3x0;
- float vfpacc3x1 = (float) vacc3x1;
-
- const float vscale = params->fp32_scalar_lrint.scale;
- vfpacc0x0 *= vscale;
- vfpacc0x1 *= vscale;
- vfpacc1x0 *= vscale;
- vfpacc1x1 *= vscale;
- vfpacc2x0 *= vscale;
- vfpacc2x1 *= vscale;
- vfpacc3x0 *= vscale;
- vfpacc3x1 *= vscale;
-
- long vrndacc0x0 = lrintf(vfpacc0x0);
- long vrndacc0x1 = lrintf(vfpacc0x1);
- long vrndacc1x0 = lrintf(vfpacc1x0);
- long vrndacc1x1 = lrintf(vfpacc1x1);
- long vrndacc2x0 = lrintf(vfpacc2x0);
- long vrndacc2x1 = lrintf(vfpacc2x1);
- long vrndacc3x0 = lrintf(vfpacc3x0);
- long vrndacc3x1 = lrintf(vfpacc3x1);
-
- const long voutput_min_less_zero_point = params->fp32_scalar_lrint.output_min_less_zero_point;
- vrndacc0x0 = XNN_UNPREDICTABLE(vrndacc0x0 < voutput_min_less_zero_point) ? voutput_min_less_zero_point : vrndacc0x0;
- vrndacc0x1 = XNN_UNPREDICTABLE(vrndacc0x1 < voutput_min_less_zero_point) ? voutput_min_less_zero_point : vrndacc0x1;
- vrndacc1x0 = XNN_UNPREDICTABLE(vrndacc1x0 < voutput_min_less_zero_point) ? voutput_min_less_zero_point : vrndacc1x0;
- vrndacc1x1 = XNN_UNPREDICTABLE(vrndacc1x1 < voutput_min_less_zero_point) ? voutput_min_less_zero_point : vrndacc1x1;
- vrndacc2x0 = XNN_UNPREDICTABLE(vrndacc2x0 < voutput_min_less_zero_point) ? voutput_min_less_zero_point : vrndacc2x0;
- vrndacc2x1 = XNN_UNPREDICTABLE(vrndacc2x1 < voutput_min_less_zero_point) ? voutput_min_less_zero_point : vrndacc2x1;
- vrndacc3x0 = XNN_UNPREDICTABLE(vrndacc3x0 < voutput_min_less_zero_point) ? voutput_min_less_zero_point : vrndacc3x0;
- vrndacc3x1 = XNN_UNPREDICTABLE(vrndacc3x1 < voutput_min_less_zero_point) ? voutput_min_less_zero_point : vrndacc3x1;
-
- const long voutput_max_less_zero_point = params->fp32_scalar_lrint.output_max_less_zero_point;
- vrndacc0x0 = XNN_UNPREDICTABLE(vrndacc0x0 > voutput_max_less_zero_point) ? voutput_max_less_zero_point : vrndacc0x0;
- vrndacc0x1 = XNN_UNPREDICTABLE(vrndacc0x1 > voutput_max_less_zero_point) ? voutput_max_less_zero_point : vrndacc0x1;
- vrndacc1x0 = XNN_UNPREDICTABLE(vrndacc1x0 > voutput_max_less_zero_point) ? voutput_max_less_zero_point : vrndacc1x0;
- vrndacc1x1 = XNN_UNPREDICTABLE(vrndacc1x1 > voutput_max_less_zero_point) ? voutput_max_less_zero_point : vrndacc1x1;
- vrndacc2x0 = XNN_UNPREDICTABLE(vrndacc2x0 > voutput_max_less_zero_point) ? voutput_max_less_zero_point : vrndacc2x0;
- vrndacc2x1 = XNN_UNPREDICTABLE(vrndacc2x1 > voutput_max_less_zero_point) ? voutput_max_less_zero_point : vrndacc2x1;
- vrndacc3x0 = XNN_UNPREDICTABLE(vrndacc3x0 > voutput_max_less_zero_point) ? voutput_max_less_zero_point : vrndacc3x0;
- vrndacc3x1 = XNN_UNPREDICTABLE(vrndacc3x1 > voutput_max_less_zero_point) ? voutput_max_less_zero_point : vrndacc3x1;
-
- const int32_t voutput_zero_point = params->fp32_scalar_lrint.output_zero_point;
- int32_t vout0x0 = (int32_t) vrndacc0x0 + voutput_zero_point;
- int32_t vout0x1 = (int32_t) vrndacc0x1 + voutput_zero_point;
- int32_t vout1x0 = (int32_t) vrndacc1x0 + voutput_zero_point;
- int32_t vout1x1 = (int32_t) vrndacc1x1 + voutput_zero_point;
- int32_t vout2x0 = (int32_t) vrndacc2x0 + voutput_zero_point;
- int32_t vout2x1 = (int32_t) vrndacc2x1 + voutput_zero_point;
- int32_t vout3x0 = (int32_t) vrndacc3x0 + voutput_zero_point;
- int32_t vout3x1 = (int32_t) vrndacc3x1 + voutput_zero_point;
-
- if XNN_LIKELY(nc >= 2) {
- c3[0] = (int8_t) vout3x0;
- c3[1] = (int8_t) vout3x1;
- c2[0] = (int8_t) vout2x0;
- c2[1] = (int8_t) vout2x1;
- c1[0] = (int8_t) vout1x0;
- c1[1] = (int8_t) vout1x1;
- c0[0] = (int8_t) vout0x0;
- c0[1] = (int8_t) vout0x1;
-
- c3 = (int8_t*) ((uintptr_t) c3 + cn_stride);
- c2 = (int8_t*) ((uintptr_t) c2 + cn_stride);
- c1 = (int8_t*) ((uintptr_t) c1 + cn_stride);
- c0 = (int8_t*) ((uintptr_t) c0 + cn_stride);
-
- a = (const int8_t**restrict) ((uintptr_t) a - ks);
- nc -= 2;
- } else {
- if (nc & 1) {
- c3[0] = (int8_t) vout3x0;
- c2[0] = (int8_t) vout2x0;
- c1[0] = (int8_t) vout1x0;
- c0[0] = (int8_t) vout0x0;
- }
-
- nc = 0;
- }
- } while (nc != 0);
-}
diff --git a/src/qs8-igemm/gen/4x2-minmax-fp32-scalar-magic.c b/src/qs8-igemm/gen/4x2-minmax-fp32-scalar-lrintf.c
similarity index 79%
copy from src/qs8-igemm/gen/4x2-minmax-fp32-scalar-magic.c
copy to src/qs8-igemm/gen/4x2-minmax-fp32-scalar-lrintf.c
index e4f79ca..71654ce 100644
--- a/src/qs8-igemm/gen/4x2-minmax-fp32-scalar-magic.c
+++ b/src/qs8-igemm/gen/4x2-minmax-fp32-scalar-lrintf.c
@@ -8,14 +8,13 @@
// LICENSE file in the root directory of this source tree.
#include <assert.h>
-
-#include <fp16.h>
+#include <math.h>
#include <xnnpack/math.h>
#include <xnnpack/gemm.h>
-void xnn_qs8_igemm_minmax_fp32_ukernel_4x2__scalar_magic(
+void xnn_qs8_igemm_minmax_fp32_ukernel_4x2__scalar_lrintf(
size_t mr,
size_t nc,
size_t kc,
@@ -122,7 +121,7 @@
float vfpacc3x0 = (float) vacc3x0;
float vfpacc3x1 = (float) vacc3x1;
- const float vscale = params->fp32_scalar_magic.scale;
+ const float vscale = params->fp32_scalar_lrintf.scale;
vfpacc0x0 *= vscale;
vfpacc0x1 *= vscale;
vfpacc1x0 *= vscale;
@@ -132,7 +131,7 @@
vfpacc3x0 *= vscale;
vfpacc3x1 *= vscale;
- const float voutput_min_less_zero_point = params->fp32_scalar_magic.output_min_less_zero_point;
+ const float voutput_min_less_zero_point = params->fp32_scalar_lrintf.output_min_less_zero_point;
vfpacc0x0 = math_max_f32(vfpacc0x0, voutput_min_less_zero_point);
vfpacc0x1 = math_max_f32(vfpacc0x1, voutput_min_less_zero_point);
vfpacc1x0 = math_max_f32(vfpacc1x0, voutput_min_less_zero_point);
@@ -142,7 +141,7 @@
vfpacc3x0 = math_max_f32(vfpacc3x0, voutput_min_less_zero_point);
vfpacc3x1 = math_max_f32(vfpacc3x1, voutput_min_less_zero_point);
- const float voutput_max_less_zero_point = params->fp32_scalar_magic.output_max_less_zero_point;
+ const float voutput_max_less_zero_point = params->fp32_scalar_lrintf.output_max_less_zero_point;
vfpacc0x0 = math_min_f32(vfpacc0x0, voutput_max_less_zero_point);
vfpacc0x1 = math_min_f32(vfpacc0x1, voutput_max_less_zero_point);
vfpacc1x0 = math_min_f32(vfpacc1x0, voutput_max_less_zero_point);
@@ -152,25 +151,24 @@
vfpacc3x0 = math_min_f32(vfpacc3x0, voutput_max_less_zero_point);
vfpacc3x1 = math_min_f32(vfpacc3x1, voutput_max_less_zero_point);
- const float vmagic_bias = params->fp32_scalar_magic.magic_bias;
- vfpacc0x0 += vmagic_bias;
- vfpacc0x1 += vmagic_bias;
- vfpacc1x0 += vmagic_bias;
- vfpacc1x1 += vmagic_bias;
- vfpacc2x0 += vmagic_bias;
- vfpacc2x1 += vmagic_bias;
- vfpacc3x0 += vmagic_bias;
- vfpacc3x1 += vmagic_bias;
+ const int32_t vrndacc0x0 = (int32_t) lrintf(vfpacc0x0);
+ const int32_t vrndacc0x1 = (int32_t) lrintf(vfpacc0x1);
+ const int32_t vrndacc1x0 = (int32_t) lrintf(vfpacc1x0);
+ const int32_t vrndacc1x1 = (int32_t) lrintf(vfpacc1x1);
+ const int32_t vrndacc2x0 = (int32_t) lrintf(vfpacc2x0);
+ const int32_t vrndacc2x1 = (int32_t) lrintf(vfpacc2x1);
+ const int32_t vrndacc3x0 = (int32_t) lrintf(vfpacc3x0);
+ const int32_t vrndacc3x1 = (int32_t) lrintf(vfpacc3x1);
- const int32_t vmagic_bias_less_output_zero_point = params->fp32_scalar_magic.magic_bias_less_output_zero_point;
- int32_t vout0x0 = (int32_t) fp32_to_bits(vfpacc0x0) - vmagic_bias_less_output_zero_point;
- int32_t vout0x1 = (int32_t) fp32_to_bits(vfpacc0x1) - vmagic_bias_less_output_zero_point;
- int32_t vout1x0 = (int32_t) fp32_to_bits(vfpacc1x0) - vmagic_bias_less_output_zero_point;
- int32_t vout1x1 = (int32_t) fp32_to_bits(vfpacc1x1) - vmagic_bias_less_output_zero_point;
- int32_t vout2x0 = (int32_t) fp32_to_bits(vfpacc2x0) - vmagic_bias_less_output_zero_point;
- int32_t vout2x1 = (int32_t) fp32_to_bits(vfpacc2x1) - vmagic_bias_less_output_zero_point;
- int32_t vout3x0 = (int32_t) fp32_to_bits(vfpacc3x0) - vmagic_bias_less_output_zero_point;
- int32_t vout3x1 = (int32_t) fp32_to_bits(vfpacc3x1) - vmagic_bias_less_output_zero_point;
+ const int32_t voutput_zero_point = params->fp32_scalar_lrintf.output_zero_point;
+ int32_t vout0x0 = vrndacc0x0 + voutput_zero_point;
+ int32_t vout0x1 = vrndacc0x1 + voutput_zero_point;
+ int32_t vout1x0 = vrndacc1x0 + voutput_zero_point;
+ int32_t vout1x1 = vrndacc1x1 + voutput_zero_point;
+ int32_t vout2x0 = vrndacc2x0 + voutput_zero_point;
+ int32_t vout2x1 = vrndacc2x1 + voutput_zero_point;
+ int32_t vout3x0 = vrndacc3x0 + voutput_zero_point;
+ int32_t vout3x1 = vrndacc3x1 + voutput_zero_point;
if XNN_LIKELY(nc >= 2) {
c3[0] = (int8_t) vout3x0;
diff --git a/src/qs8-igemm/gen/4x2-minmax-rndnu-scalar.c b/src/qs8-igemm/gen/4x2-minmax-rndnu-scalar.c
deleted file mode 100644
index 14b7ba8..0000000
--- a/src/qs8-igemm/gen/4x2-minmax-rndnu-scalar.c
+++ /dev/null
@@ -1,193 +0,0 @@
-// Auto-generated file. Do not edit!
-// Template: src/qs8-igemm/scalar.c.in
-// Generator: tools/xngen
-//
-// Copyright 2021 Google LLC
-//
-// This source code is licensed under the BSD-style license found in the
-// LICENSE file in the root directory of this source tree.
-
-#include <assert.h>
-
-#include <xnnpack/math.h>
-#include <xnnpack/gemm.h>
-
-
-void xnn_qs8_igemm_minmax_rndnu_ukernel_4x2__scalar(
- size_t mr,
- size_t nc,
- size_t kc,
- size_t ks,
- const int8_t**restrict a,
- const void*restrict w,
- int8_t*restrict c,
- size_t cm_stride,
- size_t cn_stride,
- size_t a_offset,
- const int8_t* zero,
- const union xnn_qs8_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
-{
- assert(mr != 0);
- assert(mr <= 4);
- assert(nc != 0);
- assert(kc != 0);
- assert(ks != 0);
- assert(ks % (4 * sizeof(void*)) == 0);
- assert(a != NULL);
- assert(w != NULL);
- assert(c != NULL);
-
- int8_t* c0 = c;
- int8_t* c1 = (int8_t*) ((uintptr_t) c0 + cm_stride);
- if XNN_UNPREDICTABLE(mr < 2) {
- c1 = c0;
- }
- int8_t* c2 = (int8_t*) ((uintptr_t) c1 + cm_stride);
- if XNN_UNPREDICTABLE(mr <= 2) {
- c2 = c1;
- }
- int8_t* c3 = (int8_t*) ((uintptr_t) c2 + cm_stride);
- if XNN_UNPREDICTABLE(mr != 4) {
- c3 = c2;
- }
-
- do {
- int32_t vacc0x0 = ((const int32_t*) w)[0];
- int32_t vacc0x1 = ((const int32_t*) w)[1];
- int32_t vacc1x0 = vacc0x0;
- int32_t vacc1x1 = vacc0x1;
- int32_t vacc2x0 = vacc0x0;
- int32_t vacc2x1 = vacc0x1;
- int32_t vacc3x0 = vacc0x0;
- int32_t vacc3x1 = vacc0x1;
- w = (const void*) ((const int32_t*) w + 2);
-
- size_t p = ks;
- do {
- const int8_t* restrict a0 = a[0];
- assert(a0 != NULL);
- if XNN_UNPREDICTABLE(a0 != zero) {
- a0 = (const int8_t*) ((uintptr_t) a0 + a_offset);
- }
- const int8_t* restrict a1 = a[1];
- assert(a1 != NULL);
- if XNN_UNPREDICTABLE(a1 != zero) {
- a1 = (const int8_t*) ((uintptr_t) a1 + a_offset);
- }
- const int8_t* restrict a2 = a[2];
- assert(a2 != NULL);
- if XNN_UNPREDICTABLE(a2 != zero) {
- a2 = (const int8_t*) ((uintptr_t) a2 + a_offset);
- }
- const int8_t* restrict a3 = a[3];
- assert(a3 != NULL);
- if XNN_UNPREDICTABLE(a3 != zero) {
- a3 = (const int8_t*) ((uintptr_t) a3 + a_offset);
- }
- a += 4;
-
- size_t k = kc;
- do {
- const int32_t va0 = (int32_t) *a0++;
- const int32_t va1 = (int32_t) *a1++;
- const int32_t va2 = (int32_t) *a2++;
- const int32_t va3 = (int32_t) *a3++;
-
- const int32_t vb0 = (int32_t) ((const int8_t*) w)[0];
- const int32_t vb1 = (int32_t) ((const int8_t*) w)[1];
- w = (const void*) ((const int8_t*) w + 2);
-
- vacc0x0 += va0 * vb0;
- vacc0x1 += va0 * vb1;
- vacc1x0 += va1 * vb0;
- vacc1x1 += va1 * vb1;
- vacc2x0 += va2 * vb0;
- vacc2x1 += va2 * vb1;
- vacc3x0 += va3 * vb0;
- vacc3x1 += va3 * vb1;
-
- k -= sizeof(int8_t);
- } while (k != 0);
- p -= 4 * sizeof(void*);
- } while (p != 0);
-
- const int32_t vmultiplier = params->rndnu_scalar.multiplier;
- const int64_t vproduct0x0 = (int64_t) vacc0x0 * (int64_t) vmultiplier;
- const int64_t vproduct0x1 = (int64_t) vacc0x1 * (int64_t) vmultiplier;
- const int64_t vproduct1x0 = (int64_t) vacc1x0 * (int64_t) vmultiplier;
- const int64_t vproduct1x1 = (int64_t) vacc1x1 * (int64_t) vmultiplier;
- const int64_t vproduct2x0 = (int64_t) vacc2x0 * (int64_t) vmultiplier;
- const int64_t vproduct2x1 = (int64_t) vacc2x1 * (int64_t) vmultiplier;
- const int64_t vproduct3x0 = (int64_t) vacc3x0 * (int64_t) vmultiplier;
- const int64_t vproduct3x1 = (int64_t) vacc3x1 * (int64_t) vmultiplier;
-
- const uint32_t shift = params->rndnu_scalar.shift;
- const int64_t rounding = params->rndnu_scalar.rounding;
- int32_t vout0x0 = (int32_t) asr_s64(vproduct0x0 + rounding, shift);
- int32_t vout0x1 = (int32_t) asr_s64(vproduct0x1 + rounding, shift);
- int32_t vout1x0 = (int32_t) asr_s64(vproduct1x0 + rounding, shift);
- int32_t vout1x1 = (int32_t) asr_s64(vproduct1x1 + rounding, shift);
- int32_t vout2x0 = (int32_t) asr_s64(vproduct2x0 + rounding, shift);
- int32_t vout2x1 = (int32_t) asr_s64(vproduct2x1 + rounding, shift);
- int32_t vout3x0 = (int32_t) asr_s64(vproduct3x0 + rounding, shift);
- int32_t vout3x1 = (int32_t) asr_s64(vproduct3x1 + rounding, shift);
-
- const int32_t voutput_min_less_zero_point = params->rndnu_scalar.output_min_less_zero_point;
- vout0x0 = math_max_s32(vout0x0, voutput_min_less_zero_point);
- vout0x1 = math_max_s32(vout0x1, voutput_min_less_zero_point);
- vout1x0 = math_max_s32(vout1x0, voutput_min_less_zero_point);
- vout1x1 = math_max_s32(vout1x1, voutput_min_less_zero_point);
- vout2x0 = math_max_s32(vout2x0, voutput_min_less_zero_point);
- vout2x1 = math_max_s32(vout2x1, voutput_min_less_zero_point);
- vout3x0 = math_max_s32(vout3x0, voutput_min_less_zero_point);
- vout3x1 = math_max_s32(vout3x1, voutput_min_less_zero_point);
-
- const int32_t voutput_max_less_zero_point = params->rndnu_scalar.output_max_less_zero_point;
- vout0x0 = math_min_s32(vout0x0, voutput_max_less_zero_point);
- vout0x1 = math_min_s32(vout0x1, voutput_max_less_zero_point);
- vout1x0 = math_min_s32(vout1x0, voutput_max_less_zero_point);
- vout1x1 = math_min_s32(vout1x1, voutput_max_less_zero_point);
- vout2x0 = math_min_s32(vout2x0, voutput_max_less_zero_point);
- vout2x1 = math_min_s32(vout2x1, voutput_max_less_zero_point);
- vout3x0 = math_min_s32(vout3x0, voutput_max_less_zero_point);
- vout3x1 = math_min_s32(vout3x1, voutput_max_less_zero_point);
-
- const int32_t voutput_zero_point = params->rndnu_scalar.output_zero_point;
- vout0x0 += voutput_zero_point;
- vout0x1 += voutput_zero_point;
- vout1x0 += voutput_zero_point;
- vout1x1 += voutput_zero_point;
- vout2x0 += voutput_zero_point;
- vout2x1 += voutput_zero_point;
- vout3x0 += voutput_zero_point;
- vout3x1 += voutput_zero_point;
-
- if XNN_LIKELY(nc >= 2) {
- c3[0] = (int8_t) vout3x0;
- c3[1] = (int8_t) vout3x1;
- c2[0] = (int8_t) vout2x0;
- c2[1] = (int8_t) vout2x1;
- c1[0] = (int8_t) vout1x0;
- c1[1] = (int8_t) vout1x1;
- c0[0] = (int8_t) vout0x0;
- c0[1] = (int8_t) vout0x1;
-
- c3 = (int8_t*) ((uintptr_t) c3 + cn_stride);
- c2 = (int8_t*) ((uintptr_t) c2 + cn_stride);
- c1 = (int8_t*) ((uintptr_t) c1 + cn_stride);
- c0 = (int8_t*) ((uintptr_t) c0 + cn_stride);
-
- a = (const int8_t**restrict) ((uintptr_t) a - ks);
- nc -= 2;
- } else {
- if (nc & 1) {
- c3[0] = (int8_t) vout3x0;
- c2[0] = (int8_t) vout2x0;
- c1[0] = (int8_t) vout1x0;
- c0[0] = (int8_t) vout0x0;
- }
-
- nc = 0;
- }
- } while (nc != 0);
-}
diff --git a/src/qs8-igemm/gen/4x4-minmax-fp32-scalar-magic.c b/src/qs8-igemm/gen/4x4-minmax-fp32-scalar-fmagic.c
similarity index 96%
rename from src/qs8-igemm/gen/4x4-minmax-fp32-scalar-magic.c
rename to src/qs8-igemm/gen/4x4-minmax-fp32-scalar-fmagic.c
index acf4cf0..9e2e18f 100644
--- a/src/qs8-igemm/gen/4x4-minmax-fp32-scalar-magic.c
+++ b/src/qs8-igemm/gen/4x4-minmax-fp32-scalar-fmagic.c
@@ -15,7 +15,7 @@
#include <xnnpack/gemm.h>
-void xnn_qs8_igemm_minmax_fp32_ukernel_4x4__scalar_magic(
+void xnn_qs8_igemm_minmax_fp32_ukernel_4x4__scalar_fmagic(
size_t mr,
size_t nc,
size_t kc,
@@ -148,7 +148,7 @@
float vfpacc3x2 = (float) vacc3x2;
float vfpacc3x3 = (float) vacc3x3;
- const float vscale = params->fp32_scalar_magic.scale;
+ const float vscale = params->fp32_scalar_fmagic.scale;
vfpacc0x0 *= vscale;
vfpacc0x1 *= vscale;
vfpacc0x2 *= vscale;
@@ -166,7 +166,7 @@
vfpacc3x2 *= vscale;
vfpacc3x3 *= vscale;
- const float voutput_min_less_zero_point = params->fp32_scalar_magic.output_min_less_zero_point;
+ const float voutput_min_less_zero_point = params->fp32_scalar_fmagic.output_min_less_zero_point;
vfpacc0x0 = math_max_f32(vfpacc0x0, voutput_min_less_zero_point);
vfpacc0x1 = math_max_f32(vfpacc0x1, voutput_min_less_zero_point);
vfpacc0x2 = math_max_f32(vfpacc0x2, voutput_min_less_zero_point);
@@ -184,7 +184,7 @@
vfpacc3x2 = math_max_f32(vfpacc3x2, voutput_min_less_zero_point);
vfpacc3x3 = math_max_f32(vfpacc3x3, voutput_min_less_zero_point);
- const float voutput_max_less_zero_point = params->fp32_scalar_magic.output_max_less_zero_point;
+ const float voutput_max_less_zero_point = params->fp32_scalar_fmagic.output_max_less_zero_point;
vfpacc0x0 = math_min_f32(vfpacc0x0, voutput_max_less_zero_point);
vfpacc0x1 = math_min_f32(vfpacc0x1, voutput_max_less_zero_point);
vfpacc0x2 = math_min_f32(vfpacc0x2, voutput_max_less_zero_point);
@@ -202,7 +202,7 @@
vfpacc3x2 = math_min_f32(vfpacc3x2, voutput_max_less_zero_point);
vfpacc3x3 = math_min_f32(vfpacc3x3, voutput_max_less_zero_point);
- const float vmagic_bias = params->fp32_scalar_magic.magic_bias;
+ const float vmagic_bias = params->fp32_scalar_fmagic.magic_bias;
vfpacc0x0 += vmagic_bias;
vfpacc0x1 += vmagic_bias;
vfpacc0x2 += vmagic_bias;
@@ -220,7 +220,7 @@
vfpacc3x2 += vmagic_bias;
vfpacc3x3 += vmagic_bias;
- const int32_t vmagic_bias_less_output_zero_point = params->fp32_scalar_magic.magic_bias_less_output_zero_point;
+ const int32_t vmagic_bias_less_output_zero_point = params->fp32_scalar_fmagic.magic_bias_less_output_zero_point;
int32_t vout0x0 = (int32_t) fp32_to_bits(vfpacc0x0) - vmagic_bias_less_output_zero_point;
int32_t vout0x1 = (int32_t) fp32_to_bits(vfpacc0x1) - vmagic_bias_less_output_zero_point;
int32_t vout0x2 = (int32_t) fp32_to_bits(vfpacc0x2) - vmagic_bias_less_output_zero_point;
diff --git a/src/qs8-igemm/gen/4x4-minmax-fp32-scalar-lrint.c b/src/qs8-igemm/gen/4x4-minmax-fp32-scalar-lrint.c
deleted file mode 100644
index 9bc16af..0000000
--- a/src/qs8-igemm/gen/4x4-minmax-fp32-scalar-lrint.c
+++ /dev/null
@@ -1,293 +0,0 @@
-// Auto-generated file. Do not edit!
-// Template: src/qs8-igemm/scalar.c.in
-// Generator: tools/xngen
-//
-// Copyright 2021 Google LLC
-//
-// This source code is licensed under the BSD-style license found in the
-// LICENSE file in the root directory of this source tree.
-
-#include <assert.h>
-#include <math.h>
-
-#include <xnnpack/math.h>
-#include <xnnpack/gemm.h>
-
-
-void xnn_qs8_igemm_minmax_fp32_ukernel_4x4__scalar_lrint(
- size_t mr,
- size_t nc,
- size_t kc,
- size_t ks,
- const int8_t**restrict a,
- const void*restrict w,
- int8_t*restrict c,
- size_t cm_stride,
- size_t cn_stride,
- size_t a_offset,
- const int8_t* zero,
- const union xnn_qs8_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
-{
- assert(mr != 0);
- assert(mr <= 4);
- assert(nc != 0);
- assert(kc != 0);
- assert(ks != 0);
- assert(ks % (4 * sizeof(void*)) == 0);
- assert(a != NULL);
- assert(w != NULL);
- assert(c != NULL);
-
- int8_t* c0 = c;
- int8_t* c1 = (int8_t*) ((uintptr_t) c0 + cm_stride);
- if XNN_UNPREDICTABLE(mr < 2) {
- c1 = c0;
- }
- int8_t* c2 = (int8_t*) ((uintptr_t) c1 + cm_stride);
- if XNN_UNPREDICTABLE(mr <= 2) {
- c2 = c1;
- }
- int8_t* c3 = (int8_t*) ((uintptr_t) c2 + cm_stride);
- if XNN_UNPREDICTABLE(mr != 4) {
- c3 = c2;
- }
-
- do {
- int32_t vacc0x0 = ((const int32_t*) w)[0];
- int32_t vacc0x1 = ((const int32_t*) w)[1];
- int32_t vacc0x2 = ((const int32_t*) w)[2];
- int32_t vacc0x3 = ((const int32_t*) w)[3];
- int32_t vacc1x0 = vacc0x0;
- int32_t vacc1x1 = vacc0x1;
- int32_t vacc1x2 = vacc0x2;
- int32_t vacc1x3 = vacc0x3;
- int32_t vacc2x0 = vacc0x0;
- int32_t vacc2x1 = vacc0x1;
- int32_t vacc2x2 = vacc0x2;
- int32_t vacc2x3 = vacc0x3;
- int32_t vacc3x0 = vacc0x0;
- int32_t vacc3x1 = vacc0x1;
- int32_t vacc3x2 = vacc0x2;
- int32_t vacc3x3 = vacc0x3;
- w = (const void*) ((const int32_t*) w + 4);
-
- size_t p = ks;
- do {
- const int8_t* restrict a0 = a[0];
- assert(a0 != NULL);
- if XNN_UNPREDICTABLE(a0 != zero) {
- a0 = (const int8_t*) ((uintptr_t) a0 + a_offset);
- }
- const int8_t* restrict a1 = a[1];
- assert(a1 != NULL);
- if XNN_UNPREDICTABLE(a1 != zero) {
- a1 = (const int8_t*) ((uintptr_t) a1 + a_offset);
- }
- const int8_t* restrict a2 = a[2];
- assert(a2 != NULL);
- if XNN_UNPREDICTABLE(a2 != zero) {
- a2 = (const int8_t*) ((uintptr_t) a2 + a_offset);
- }
- const int8_t* restrict a3 = a[3];
- assert(a3 != NULL);
- if XNN_UNPREDICTABLE(a3 != zero) {
- a3 = (const int8_t*) ((uintptr_t) a3 + a_offset);
- }
- a += 4;
-
- size_t k = kc;
- do {
- const int32_t va0 = (int32_t) *a0++;
- const int32_t va1 = (int32_t) *a1++;
- const int32_t va2 = (int32_t) *a2++;
- const int32_t va3 = (int32_t) *a3++;
-
- const int32_t vb0 = (int32_t) ((const int8_t*) w)[0];
- const int32_t vb1 = (int32_t) ((const int8_t*) w)[1];
- const int32_t vb2 = (int32_t) ((const int8_t*) w)[2];
- const int32_t vb3 = (int32_t) ((const int8_t*) w)[3];
- w = (const void*) ((const int8_t*) w + 4);
-
- vacc0x0 += va0 * vb0;
- vacc0x1 += va0 * vb1;
- vacc0x2 += va0 * vb2;
- vacc0x3 += va0 * vb3;
- vacc1x0 += va1 * vb0;
- vacc1x1 += va1 * vb1;
- vacc1x2 += va1 * vb2;
- vacc1x3 += va1 * vb3;
- vacc2x0 += va2 * vb0;
- vacc2x1 += va2 * vb1;
- vacc2x2 += va2 * vb2;
- vacc2x3 += va2 * vb3;
- vacc3x0 += va3 * vb0;
- vacc3x1 += va3 * vb1;
- vacc3x2 += va3 * vb2;
- vacc3x3 += va3 * vb3;
-
- k -= sizeof(int8_t);
- } while (k != 0);
- p -= 4 * sizeof(void*);
- } while (p != 0);
-
- float vfpacc0x0 = (float) vacc0x0;
- float vfpacc0x1 = (float) vacc0x1;
- float vfpacc0x2 = (float) vacc0x2;
- float vfpacc0x3 = (float) vacc0x3;
- float vfpacc1x0 = (float) vacc1x0;
- float vfpacc1x1 = (float) vacc1x1;
- float vfpacc1x2 = (float) vacc1x2;
- float vfpacc1x3 = (float) vacc1x3;
- float vfpacc2x0 = (float) vacc2x0;
- float vfpacc2x1 = (float) vacc2x1;
- float vfpacc2x2 = (float) vacc2x2;
- float vfpacc2x3 = (float) vacc2x3;
- float vfpacc3x0 = (float) vacc3x0;
- float vfpacc3x1 = (float) vacc3x1;
- float vfpacc3x2 = (float) vacc3x2;
- float vfpacc3x3 = (float) vacc3x3;
-
- const float vscale = params->fp32_scalar_lrint.scale;
- vfpacc0x0 *= vscale;
- vfpacc0x1 *= vscale;
- vfpacc0x2 *= vscale;
- vfpacc0x3 *= vscale;
- vfpacc1x0 *= vscale;
- vfpacc1x1 *= vscale;
- vfpacc1x2 *= vscale;
- vfpacc1x3 *= vscale;
- vfpacc2x0 *= vscale;
- vfpacc2x1 *= vscale;
- vfpacc2x2 *= vscale;
- vfpacc2x3 *= vscale;
- vfpacc3x0 *= vscale;
- vfpacc3x1 *= vscale;
- vfpacc3x2 *= vscale;
- vfpacc3x3 *= vscale;
-
- long vrndacc0x0 = lrintf(vfpacc0x0);
- long vrndacc0x1 = lrintf(vfpacc0x1);
- long vrndacc0x2 = lrintf(vfpacc0x2);
- long vrndacc0x3 = lrintf(vfpacc0x3);
- long vrndacc1x0 = lrintf(vfpacc1x0);
- long vrndacc1x1 = lrintf(vfpacc1x1);
- long vrndacc1x2 = lrintf(vfpacc1x2);
- long vrndacc1x3 = lrintf(vfpacc1x3);
- long vrndacc2x0 = lrintf(vfpacc2x0);
- long vrndacc2x1 = lrintf(vfpacc2x1);
- long vrndacc2x2 = lrintf(vfpacc2x2);
- long vrndacc2x3 = lrintf(vfpacc2x3);
- long vrndacc3x0 = lrintf(vfpacc3x0);
- long vrndacc3x1 = lrintf(vfpacc3x1);
- long vrndacc3x2 = lrintf(vfpacc3x2);
- long vrndacc3x3 = lrintf(vfpacc3x3);
-
- const long voutput_min_less_zero_point = params->fp32_scalar_lrint.output_min_less_zero_point;
- vrndacc0x0 = XNN_UNPREDICTABLE(vrndacc0x0 < voutput_min_less_zero_point) ? voutput_min_less_zero_point : vrndacc0x0;
- vrndacc0x1 = XNN_UNPREDICTABLE(vrndacc0x1 < voutput_min_less_zero_point) ? voutput_min_less_zero_point : vrndacc0x1;
- vrndacc0x2 = XNN_UNPREDICTABLE(vrndacc0x2 < voutput_min_less_zero_point) ? voutput_min_less_zero_point : vrndacc0x2;
- vrndacc0x3 = XNN_UNPREDICTABLE(vrndacc0x3 < voutput_min_less_zero_point) ? voutput_min_less_zero_point : vrndacc0x3;
- vrndacc1x0 = XNN_UNPREDICTABLE(vrndacc1x0 < voutput_min_less_zero_point) ? voutput_min_less_zero_point : vrndacc1x0;
- vrndacc1x1 = XNN_UNPREDICTABLE(vrndacc1x1 < voutput_min_less_zero_point) ? voutput_min_less_zero_point : vrndacc1x1;
- vrndacc1x2 = XNN_UNPREDICTABLE(vrndacc1x2 < voutput_min_less_zero_point) ? voutput_min_less_zero_point : vrndacc1x2;
- vrndacc1x3 = XNN_UNPREDICTABLE(vrndacc1x3 < voutput_min_less_zero_point) ? voutput_min_less_zero_point : vrndacc1x3;
- vrndacc2x0 = XNN_UNPREDICTABLE(vrndacc2x0 < voutput_min_less_zero_point) ? voutput_min_less_zero_point : vrndacc2x0;
- vrndacc2x1 = XNN_UNPREDICTABLE(vrndacc2x1 < voutput_min_less_zero_point) ? voutput_min_less_zero_point : vrndacc2x1;
- vrndacc2x2 = XNN_UNPREDICTABLE(vrndacc2x2 < voutput_min_less_zero_point) ? voutput_min_less_zero_point : vrndacc2x2;
- vrndacc2x3 = XNN_UNPREDICTABLE(vrndacc2x3 < voutput_min_less_zero_point) ? voutput_min_less_zero_point : vrndacc2x3;
- vrndacc3x0 = XNN_UNPREDICTABLE(vrndacc3x0 < voutput_min_less_zero_point) ? voutput_min_less_zero_point : vrndacc3x0;
- vrndacc3x1 = XNN_UNPREDICTABLE(vrndacc3x1 < voutput_min_less_zero_point) ? voutput_min_less_zero_point : vrndacc3x1;
- vrndacc3x2 = XNN_UNPREDICTABLE(vrndacc3x2 < voutput_min_less_zero_point) ? voutput_min_less_zero_point : vrndacc3x2;
- vrndacc3x3 = XNN_UNPREDICTABLE(vrndacc3x3 < voutput_min_less_zero_point) ? voutput_min_less_zero_point : vrndacc3x3;
-
- const long voutput_max_less_zero_point = params->fp32_scalar_lrint.output_max_less_zero_point;
- vrndacc0x0 = XNN_UNPREDICTABLE(vrndacc0x0 > voutput_max_less_zero_point) ? voutput_max_less_zero_point : vrndacc0x0;
- vrndacc0x1 = XNN_UNPREDICTABLE(vrndacc0x1 > voutput_max_less_zero_point) ? voutput_max_less_zero_point : vrndacc0x1;
- vrndacc0x2 = XNN_UNPREDICTABLE(vrndacc0x2 > voutput_max_less_zero_point) ? voutput_max_less_zero_point : vrndacc0x2;
- vrndacc0x3 = XNN_UNPREDICTABLE(vrndacc0x3 > voutput_max_less_zero_point) ? voutput_max_less_zero_point : vrndacc0x3;
- vrndacc1x0 = XNN_UNPREDICTABLE(vrndacc1x0 > voutput_max_less_zero_point) ? voutput_max_less_zero_point : vrndacc1x0;
- vrndacc1x1 = XNN_UNPREDICTABLE(vrndacc1x1 > voutput_max_less_zero_point) ? voutput_max_less_zero_point : vrndacc1x1;
- vrndacc1x2 = XNN_UNPREDICTABLE(vrndacc1x2 > voutput_max_less_zero_point) ? voutput_max_less_zero_point : vrndacc1x2;
- vrndacc1x3 = XNN_UNPREDICTABLE(vrndacc1x3 > voutput_max_less_zero_point) ? voutput_max_less_zero_point : vrndacc1x3;
- vrndacc2x0 = XNN_UNPREDICTABLE(vrndacc2x0 > voutput_max_less_zero_point) ? voutput_max_less_zero_point : vrndacc2x0;
- vrndacc2x1 = XNN_UNPREDICTABLE(vrndacc2x1 > voutput_max_less_zero_point) ? voutput_max_less_zero_point : vrndacc2x1;
- vrndacc2x2 = XNN_UNPREDICTABLE(vrndacc2x2 > voutput_max_less_zero_point) ? voutput_max_less_zero_point : vrndacc2x2;
- vrndacc2x3 = XNN_UNPREDICTABLE(vrndacc2x3 > voutput_max_less_zero_point) ? voutput_max_less_zero_point : vrndacc2x3;
- vrndacc3x0 = XNN_UNPREDICTABLE(vrndacc3x0 > voutput_max_less_zero_point) ? voutput_max_less_zero_point : vrndacc3x0;
- vrndacc3x1 = XNN_UNPREDICTABLE(vrndacc3x1 > voutput_max_less_zero_point) ? voutput_max_less_zero_point : vrndacc3x1;
- vrndacc3x2 = XNN_UNPREDICTABLE(vrndacc3x2 > voutput_max_less_zero_point) ? voutput_max_less_zero_point : vrndacc3x2;
- vrndacc3x3 = XNN_UNPREDICTABLE(vrndacc3x3 > voutput_max_less_zero_point) ? voutput_max_less_zero_point : vrndacc3x3;
-
- const int32_t voutput_zero_point = params->fp32_scalar_lrint.output_zero_point;
- int32_t vout0x0 = (int32_t) vrndacc0x0 + voutput_zero_point;
- int32_t vout0x1 = (int32_t) vrndacc0x1 + voutput_zero_point;
- int32_t vout0x2 = (int32_t) vrndacc0x2 + voutput_zero_point;
- int32_t vout0x3 = (int32_t) vrndacc0x3 + voutput_zero_point;
- int32_t vout1x0 = (int32_t) vrndacc1x0 + voutput_zero_point;
- int32_t vout1x1 = (int32_t) vrndacc1x1 + voutput_zero_point;
- int32_t vout1x2 = (int32_t) vrndacc1x2 + voutput_zero_point;
- int32_t vout1x3 = (int32_t) vrndacc1x3 + voutput_zero_point;
- int32_t vout2x0 = (int32_t) vrndacc2x0 + voutput_zero_point;
- int32_t vout2x1 = (int32_t) vrndacc2x1 + voutput_zero_point;
- int32_t vout2x2 = (int32_t) vrndacc2x2 + voutput_zero_point;
- int32_t vout2x3 = (int32_t) vrndacc2x3 + voutput_zero_point;
- int32_t vout3x0 = (int32_t) vrndacc3x0 + voutput_zero_point;
- int32_t vout3x1 = (int32_t) vrndacc3x1 + voutput_zero_point;
- int32_t vout3x2 = (int32_t) vrndacc3x2 + voutput_zero_point;
- int32_t vout3x3 = (int32_t) vrndacc3x3 + voutput_zero_point;
-
- if XNN_LIKELY(nc >= 4) {
- c3[0] = (int8_t) vout3x0;
- c3[1] = (int8_t) vout3x1;
- c3[2] = (int8_t) vout3x2;
- c3[3] = (int8_t) vout3x3;
- c2[0] = (int8_t) vout2x0;
- c2[1] = (int8_t) vout2x1;
- c2[2] = (int8_t) vout2x2;
- c2[3] = (int8_t) vout2x3;
- c1[0] = (int8_t) vout1x0;
- c1[1] = (int8_t) vout1x1;
- c1[2] = (int8_t) vout1x2;
- c1[3] = (int8_t) vout1x3;
- c0[0] = (int8_t) vout0x0;
- c0[1] = (int8_t) vout0x1;
- c0[2] = (int8_t) vout0x2;
- c0[3] = (int8_t) vout0x3;
-
- c3 = (int8_t*) ((uintptr_t) c3 + cn_stride);
- c2 = (int8_t*) ((uintptr_t) c2 + cn_stride);
- c1 = (int8_t*) ((uintptr_t) c1 + cn_stride);
- c0 = (int8_t*) ((uintptr_t) c0 + cn_stride);
-
- a = (const int8_t**restrict) ((uintptr_t) a - ks);
- nc -= 4;
- } else {
- if (nc & 2) {
- c3[0] = (int8_t) vout3x0;
- c3[1] = (int8_t) vout3x1;
- vout3x0 = vout3x2;
- c3 += 2;
- c2[0] = (int8_t) vout2x0;
- c2[1] = (int8_t) vout2x1;
- vout2x0 = vout2x2;
- c2 += 2;
- c1[0] = (int8_t) vout1x0;
- c1[1] = (int8_t) vout1x1;
- vout1x0 = vout1x2;
- c1 += 2;
- c0[0] = (int8_t) vout0x0;
- c0[1] = (int8_t) vout0x1;
- vout0x0 = vout0x2;
- c0 += 2;
- }
- if (nc & 1) {
- c3[0] = (int8_t) vout3x0;
- c2[0] = (int8_t) vout2x0;
- c1[0] = (int8_t) vout1x0;
- c0[0] = (int8_t) vout0x0;
- }
-
- nc = 0;
- }
- } while (nc != 0);
-}
diff --git a/src/qs8-igemm/gen/4x4-minmax-fp32-scalar-magic.c b/src/qs8-igemm/gen/4x4-minmax-fp32-scalar-lrintf.c
similarity index 78%
copy from src/qs8-igemm/gen/4x4-minmax-fp32-scalar-magic.c
copy to src/qs8-igemm/gen/4x4-minmax-fp32-scalar-lrintf.c
index acf4cf0..d9eab5e 100644
--- a/src/qs8-igemm/gen/4x4-minmax-fp32-scalar-magic.c
+++ b/src/qs8-igemm/gen/4x4-minmax-fp32-scalar-lrintf.c
@@ -8,14 +8,13 @@
// LICENSE file in the root directory of this source tree.
#include <assert.h>
-
-#include <fp16.h>
+#include <math.h>
#include <xnnpack/math.h>
#include <xnnpack/gemm.h>
-void xnn_qs8_igemm_minmax_fp32_ukernel_4x4__scalar_magic(
+void xnn_qs8_igemm_minmax_fp32_ukernel_4x4__scalar_lrintf(
size_t mr,
size_t nc,
size_t kc,
@@ -148,7 +147,7 @@
float vfpacc3x2 = (float) vacc3x2;
float vfpacc3x3 = (float) vacc3x3;
- const float vscale = params->fp32_scalar_magic.scale;
+ const float vscale = params->fp32_scalar_lrintf.scale;
vfpacc0x0 *= vscale;
vfpacc0x1 *= vscale;
vfpacc0x2 *= vscale;
@@ -166,7 +165,7 @@
vfpacc3x2 *= vscale;
vfpacc3x3 *= vscale;
- const float voutput_min_less_zero_point = params->fp32_scalar_magic.output_min_less_zero_point;
+ const float voutput_min_less_zero_point = params->fp32_scalar_lrintf.output_min_less_zero_point;
vfpacc0x0 = math_max_f32(vfpacc0x0, voutput_min_less_zero_point);
vfpacc0x1 = math_max_f32(vfpacc0x1, voutput_min_less_zero_point);
vfpacc0x2 = math_max_f32(vfpacc0x2, voutput_min_less_zero_point);
@@ -184,7 +183,7 @@
vfpacc3x2 = math_max_f32(vfpacc3x2, voutput_min_less_zero_point);
vfpacc3x3 = math_max_f32(vfpacc3x3, voutput_min_less_zero_point);
- const float voutput_max_less_zero_point = params->fp32_scalar_magic.output_max_less_zero_point;
+ const float voutput_max_less_zero_point = params->fp32_scalar_lrintf.output_max_less_zero_point;
vfpacc0x0 = math_min_f32(vfpacc0x0, voutput_max_less_zero_point);
vfpacc0x1 = math_min_f32(vfpacc0x1, voutput_max_less_zero_point);
vfpacc0x2 = math_min_f32(vfpacc0x2, voutput_max_less_zero_point);
@@ -202,41 +201,40 @@
vfpacc3x2 = math_min_f32(vfpacc3x2, voutput_max_less_zero_point);
vfpacc3x3 = math_min_f32(vfpacc3x3, voutput_max_less_zero_point);
- const float vmagic_bias = params->fp32_scalar_magic.magic_bias;
- vfpacc0x0 += vmagic_bias;
- vfpacc0x1 += vmagic_bias;
- vfpacc0x2 += vmagic_bias;
- vfpacc0x3 += vmagic_bias;
- vfpacc1x0 += vmagic_bias;
- vfpacc1x1 += vmagic_bias;
- vfpacc1x2 += vmagic_bias;
- vfpacc1x3 += vmagic_bias;
- vfpacc2x0 += vmagic_bias;
- vfpacc2x1 += vmagic_bias;
- vfpacc2x2 += vmagic_bias;
- vfpacc2x3 += vmagic_bias;
- vfpacc3x0 += vmagic_bias;
- vfpacc3x1 += vmagic_bias;
- vfpacc3x2 += vmagic_bias;
- vfpacc3x3 += vmagic_bias;
+ const int32_t vrndacc0x0 = (int32_t) lrintf(vfpacc0x0);
+ const int32_t vrndacc0x1 = (int32_t) lrintf(vfpacc0x1);
+ const int32_t vrndacc0x2 = (int32_t) lrintf(vfpacc0x2);
+ const int32_t vrndacc0x3 = (int32_t) lrintf(vfpacc0x3);
+ const int32_t vrndacc1x0 = (int32_t) lrintf(vfpacc1x0);
+ const int32_t vrndacc1x1 = (int32_t) lrintf(vfpacc1x1);
+ const int32_t vrndacc1x2 = (int32_t) lrintf(vfpacc1x2);
+ const int32_t vrndacc1x3 = (int32_t) lrintf(vfpacc1x3);
+ const int32_t vrndacc2x0 = (int32_t) lrintf(vfpacc2x0);
+ const int32_t vrndacc2x1 = (int32_t) lrintf(vfpacc2x1);
+ const int32_t vrndacc2x2 = (int32_t) lrintf(vfpacc2x2);
+ const int32_t vrndacc2x3 = (int32_t) lrintf(vfpacc2x3);
+ const int32_t vrndacc3x0 = (int32_t) lrintf(vfpacc3x0);
+ const int32_t vrndacc3x1 = (int32_t) lrintf(vfpacc3x1);
+ const int32_t vrndacc3x2 = (int32_t) lrintf(vfpacc3x2);
+ const int32_t vrndacc3x3 = (int32_t) lrintf(vfpacc3x3);
- const int32_t vmagic_bias_less_output_zero_point = params->fp32_scalar_magic.magic_bias_less_output_zero_point;
- int32_t vout0x0 = (int32_t) fp32_to_bits(vfpacc0x0) - vmagic_bias_less_output_zero_point;
- int32_t vout0x1 = (int32_t) fp32_to_bits(vfpacc0x1) - vmagic_bias_less_output_zero_point;
- int32_t vout0x2 = (int32_t) fp32_to_bits(vfpacc0x2) - vmagic_bias_less_output_zero_point;
- int32_t vout0x3 = (int32_t) fp32_to_bits(vfpacc0x3) - vmagic_bias_less_output_zero_point;
- int32_t vout1x0 = (int32_t) fp32_to_bits(vfpacc1x0) - vmagic_bias_less_output_zero_point;
- int32_t vout1x1 = (int32_t) fp32_to_bits(vfpacc1x1) - vmagic_bias_less_output_zero_point;
- int32_t vout1x2 = (int32_t) fp32_to_bits(vfpacc1x2) - vmagic_bias_less_output_zero_point;
- int32_t vout1x3 = (int32_t) fp32_to_bits(vfpacc1x3) - vmagic_bias_less_output_zero_point;
- int32_t vout2x0 = (int32_t) fp32_to_bits(vfpacc2x0) - vmagic_bias_less_output_zero_point;
- int32_t vout2x1 = (int32_t) fp32_to_bits(vfpacc2x1) - vmagic_bias_less_output_zero_point;
- int32_t vout2x2 = (int32_t) fp32_to_bits(vfpacc2x2) - vmagic_bias_less_output_zero_point;
- int32_t vout2x3 = (int32_t) fp32_to_bits(vfpacc2x3) - vmagic_bias_less_output_zero_point;
- int32_t vout3x0 = (int32_t) fp32_to_bits(vfpacc3x0) - vmagic_bias_less_output_zero_point;
- int32_t vout3x1 = (int32_t) fp32_to_bits(vfpacc3x1) - vmagic_bias_less_output_zero_point;
- int32_t vout3x2 = (int32_t) fp32_to_bits(vfpacc3x2) - vmagic_bias_less_output_zero_point;
- int32_t vout3x3 = (int32_t) fp32_to_bits(vfpacc3x3) - vmagic_bias_less_output_zero_point;
+ const int32_t voutput_zero_point = params->fp32_scalar_lrintf.output_zero_point;
+ int32_t vout0x0 = vrndacc0x0 + voutput_zero_point;
+ int32_t vout0x1 = vrndacc0x1 + voutput_zero_point;
+ int32_t vout0x2 = vrndacc0x2 + voutput_zero_point;
+ int32_t vout0x3 = vrndacc0x3 + voutput_zero_point;
+ int32_t vout1x0 = vrndacc1x0 + voutput_zero_point;
+ int32_t vout1x1 = vrndacc1x1 + voutput_zero_point;
+ int32_t vout1x2 = vrndacc1x2 + voutput_zero_point;
+ int32_t vout1x3 = vrndacc1x3 + voutput_zero_point;
+ int32_t vout2x0 = vrndacc2x0 + voutput_zero_point;
+ int32_t vout2x1 = vrndacc2x1 + voutput_zero_point;
+ int32_t vout2x2 = vrndacc2x2 + voutput_zero_point;
+ int32_t vout2x3 = vrndacc2x3 + voutput_zero_point;
+ int32_t vout3x0 = vrndacc3x0 + voutput_zero_point;
+ int32_t vout3x1 = vrndacc3x1 + voutput_zero_point;
+ int32_t vout3x2 = vrndacc3x2 + voutput_zero_point;
+ int32_t vout3x3 = vrndacc3x3 + voutput_zero_point;
if XNN_LIKELY(nc >= 4) {
c3[0] = (int8_t) vout3x0;
diff --git a/src/qs8-igemm/gen/4x4-minmax-rndnu-scalar.c b/src/qs8-igemm/gen/4x4-minmax-rndnu-scalar.c
deleted file mode 100644
index d66952a..0000000
--- a/src/qs8-igemm/gen/4x4-minmax-rndnu-scalar.c
+++ /dev/null
@@ -1,277 +0,0 @@
-// Auto-generated file. Do not edit!
-// Template: src/qs8-igemm/scalar.c.in
-// Generator: tools/xngen
-//
-// Copyright 2021 Google LLC
-//
-// This source code is licensed under the BSD-style license found in the
-// LICENSE file in the root directory of this source tree.
-
-#include <assert.h>
-
-#include <xnnpack/math.h>
-#include <xnnpack/gemm.h>
-
-
-void xnn_qs8_igemm_minmax_rndnu_ukernel_4x4__scalar(
- size_t mr,
- size_t nc,
- size_t kc,
- size_t ks,
- const int8_t**restrict a,
- const void*restrict w,
- int8_t*restrict c,
- size_t cm_stride,
- size_t cn_stride,
- size_t a_offset,
- const int8_t* zero,
- const union xnn_qs8_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
-{
- assert(mr != 0);
- assert(mr <= 4);
- assert(nc != 0);
- assert(kc != 0);
- assert(ks != 0);
- assert(ks % (4 * sizeof(void*)) == 0);
- assert(a != NULL);
- assert(w != NULL);
- assert(c != NULL);
-
- int8_t* c0 = c;
- int8_t* c1 = (int8_t*) ((uintptr_t) c0 + cm_stride);
- if XNN_UNPREDICTABLE(mr < 2) {
- c1 = c0;
- }
- int8_t* c2 = (int8_t*) ((uintptr_t) c1 + cm_stride);
- if XNN_UNPREDICTABLE(mr <= 2) {
- c2 = c1;
- }
- int8_t* c3 = (int8_t*) ((uintptr_t) c2 + cm_stride);
- if XNN_UNPREDICTABLE(mr != 4) {
- c3 = c2;
- }
-
- do {
- int32_t vacc0x0 = ((const int32_t*) w)[0];
- int32_t vacc0x1 = ((const int32_t*) w)[1];
- int32_t vacc0x2 = ((const int32_t*) w)[2];
- int32_t vacc0x3 = ((const int32_t*) w)[3];
- int32_t vacc1x0 = vacc0x0;
- int32_t vacc1x1 = vacc0x1;
- int32_t vacc1x2 = vacc0x2;
- int32_t vacc1x3 = vacc0x3;
- int32_t vacc2x0 = vacc0x0;
- int32_t vacc2x1 = vacc0x1;
- int32_t vacc2x2 = vacc0x2;
- int32_t vacc2x3 = vacc0x3;
- int32_t vacc3x0 = vacc0x0;
- int32_t vacc3x1 = vacc0x1;
- int32_t vacc3x2 = vacc0x2;
- int32_t vacc3x3 = vacc0x3;
- w = (const void*) ((const int32_t*) w + 4);
-
- size_t p = ks;
- do {
- const int8_t* restrict a0 = a[0];
- assert(a0 != NULL);
- if XNN_UNPREDICTABLE(a0 != zero) {
- a0 = (const int8_t*) ((uintptr_t) a0 + a_offset);
- }
- const int8_t* restrict a1 = a[1];
- assert(a1 != NULL);
- if XNN_UNPREDICTABLE(a1 != zero) {
- a1 = (const int8_t*) ((uintptr_t) a1 + a_offset);
- }
- const int8_t* restrict a2 = a[2];
- assert(a2 != NULL);
- if XNN_UNPREDICTABLE(a2 != zero) {
- a2 = (const int8_t*) ((uintptr_t) a2 + a_offset);
- }
- const int8_t* restrict a3 = a[3];
- assert(a3 != NULL);
- if XNN_UNPREDICTABLE(a3 != zero) {
- a3 = (const int8_t*) ((uintptr_t) a3 + a_offset);
- }
- a += 4;
-
- size_t k = kc;
- do {
- const int32_t va0 = (int32_t) *a0++;
- const int32_t va1 = (int32_t) *a1++;
- const int32_t va2 = (int32_t) *a2++;
- const int32_t va3 = (int32_t) *a3++;
-
- const int32_t vb0 = (int32_t) ((const int8_t*) w)[0];
- const int32_t vb1 = (int32_t) ((const int8_t*) w)[1];
- const int32_t vb2 = (int32_t) ((const int8_t*) w)[2];
- const int32_t vb3 = (int32_t) ((const int8_t*) w)[3];
- w = (const void*) ((const int8_t*) w + 4);
-
- vacc0x0 += va0 * vb0;
- vacc0x1 += va0 * vb1;
- vacc0x2 += va0 * vb2;
- vacc0x3 += va0 * vb3;
- vacc1x0 += va1 * vb0;
- vacc1x1 += va1 * vb1;
- vacc1x2 += va1 * vb2;
- vacc1x3 += va1 * vb3;
- vacc2x0 += va2 * vb0;
- vacc2x1 += va2 * vb1;
- vacc2x2 += va2 * vb2;
- vacc2x3 += va2 * vb3;
- vacc3x0 += va3 * vb0;
- vacc3x1 += va3 * vb1;
- vacc3x2 += va3 * vb2;
- vacc3x3 += va3 * vb3;
-
- k -= sizeof(int8_t);
- } while (k != 0);
- p -= 4 * sizeof(void*);
- } while (p != 0);
-
- const int32_t vmultiplier = params->rndnu_scalar.multiplier;
- const int64_t vproduct0x0 = (int64_t) vacc0x0 * (int64_t) vmultiplier;
- const int64_t vproduct0x1 = (int64_t) vacc0x1 * (int64_t) vmultiplier;
- const int64_t vproduct0x2 = (int64_t) vacc0x2 * (int64_t) vmultiplier;
- const int64_t vproduct0x3 = (int64_t) vacc0x3 * (int64_t) vmultiplier;
- const int64_t vproduct1x0 = (int64_t) vacc1x0 * (int64_t) vmultiplier;
- const int64_t vproduct1x1 = (int64_t) vacc1x1 * (int64_t) vmultiplier;
- const int64_t vproduct1x2 = (int64_t) vacc1x2 * (int64_t) vmultiplier;
- const int64_t vproduct1x3 = (int64_t) vacc1x3 * (int64_t) vmultiplier;
- const int64_t vproduct2x0 = (int64_t) vacc2x0 * (int64_t) vmultiplier;
- const int64_t vproduct2x1 = (int64_t) vacc2x1 * (int64_t) vmultiplier;
- const int64_t vproduct2x2 = (int64_t) vacc2x2 * (int64_t) vmultiplier;
- const int64_t vproduct2x3 = (int64_t) vacc2x3 * (int64_t) vmultiplier;
- const int64_t vproduct3x0 = (int64_t) vacc3x0 * (int64_t) vmultiplier;
- const int64_t vproduct3x1 = (int64_t) vacc3x1 * (int64_t) vmultiplier;
- const int64_t vproduct3x2 = (int64_t) vacc3x2 * (int64_t) vmultiplier;
- const int64_t vproduct3x3 = (int64_t) vacc3x3 * (int64_t) vmultiplier;
-
- const uint32_t shift = params->rndnu_scalar.shift;
- const int64_t rounding = params->rndnu_scalar.rounding;
- int32_t vout0x0 = (int32_t) asr_s64(vproduct0x0 + rounding, shift);
- int32_t vout0x1 = (int32_t) asr_s64(vproduct0x1 + rounding, shift);
- int32_t vout0x2 = (int32_t) asr_s64(vproduct0x2 + rounding, shift);
- int32_t vout0x3 = (int32_t) asr_s64(vproduct0x3 + rounding, shift);
- int32_t vout1x0 = (int32_t) asr_s64(vproduct1x0 + rounding, shift);
- int32_t vout1x1 = (int32_t) asr_s64(vproduct1x1 + rounding, shift);
- int32_t vout1x2 = (int32_t) asr_s64(vproduct1x2 + rounding, shift);
- int32_t vout1x3 = (int32_t) asr_s64(vproduct1x3 + rounding, shift);
- int32_t vout2x0 = (int32_t) asr_s64(vproduct2x0 + rounding, shift);
- int32_t vout2x1 = (int32_t) asr_s64(vproduct2x1 + rounding, shift);
- int32_t vout2x2 = (int32_t) asr_s64(vproduct2x2 + rounding, shift);
- int32_t vout2x3 = (int32_t) asr_s64(vproduct2x3 + rounding, shift);
- int32_t vout3x0 = (int32_t) asr_s64(vproduct3x0 + rounding, shift);
- int32_t vout3x1 = (int32_t) asr_s64(vproduct3x1 + rounding, shift);
- int32_t vout3x2 = (int32_t) asr_s64(vproduct3x2 + rounding, shift);
- int32_t vout3x3 = (int32_t) asr_s64(vproduct3x3 + rounding, shift);
-
- const int32_t voutput_min_less_zero_point = params->rndnu_scalar.output_min_less_zero_point;
- vout0x0 = math_max_s32(vout0x0, voutput_min_less_zero_point);
- vout0x1 = math_max_s32(vout0x1, voutput_min_less_zero_point);
- vout0x2 = math_max_s32(vout0x2, voutput_min_less_zero_point);
- vout0x3 = math_max_s32(vout0x3, voutput_min_less_zero_point);
- vout1x0 = math_max_s32(vout1x0, voutput_min_less_zero_point);
- vout1x1 = math_max_s32(vout1x1, voutput_min_less_zero_point);
- vout1x2 = math_max_s32(vout1x2, voutput_min_less_zero_point);
- vout1x3 = math_max_s32(vout1x3, voutput_min_less_zero_point);
- vout2x0 = math_max_s32(vout2x0, voutput_min_less_zero_point);
- vout2x1 = math_max_s32(vout2x1, voutput_min_less_zero_point);
- vout2x2 = math_max_s32(vout2x2, voutput_min_less_zero_point);
- vout2x3 = math_max_s32(vout2x3, voutput_min_less_zero_point);
- vout3x0 = math_max_s32(vout3x0, voutput_min_less_zero_point);
- vout3x1 = math_max_s32(vout3x1, voutput_min_less_zero_point);
- vout3x2 = math_max_s32(vout3x2, voutput_min_less_zero_point);
- vout3x3 = math_max_s32(vout3x3, voutput_min_less_zero_point);
-
- const int32_t voutput_max_less_zero_point = params->rndnu_scalar.output_max_less_zero_point;
- vout0x0 = math_min_s32(vout0x0, voutput_max_less_zero_point);
- vout0x1 = math_min_s32(vout0x1, voutput_max_less_zero_point);
- vout0x2 = math_min_s32(vout0x2, voutput_max_less_zero_point);
- vout0x3 = math_min_s32(vout0x3, voutput_max_less_zero_point);
- vout1x0 = math_min_s32(vout1x0, voutput_max_less_zero_point);
- vout1x1 = math_min_s32(vout1x1, voutput_max_less_zero_point);
- vout1x2 = math_min_s32(vout1x2, voutput_max_less_zero_point);
- vout1x3 = math_min_s32(vout1x3, voutput_max_less_zero_point);
- vout2x0 = math_min_s32(vout2x0, voutput_max_less_zero_point);
- vout2x1 = math_min_s32(vout2x1, voutput_max_less_zero_point);
- vout2x2 = math_min_s32(vout2x2, voutput_max_less_zero_point);
- vout2x3 = math_min_s32(vout2x3, voutput_max_less_zero_point);
- vout3x0 = math_min_s32(vout3x0, voutput_max_less_zero_point);
- vout3x1 = math_min_s32(vout3x1, voutput_max_less_zero_point);
- vout3x2 = math_min_s32(vout3x2, voutput_max_less_zero_point);
- vout3x3 = math_min_s32(vout3x3, voutput_max_less_zero_point);
-
- const int32_t voutput_zero_point = params->rndnu_scalar.output_zero_point;
- vout0x0 += voutput_zero_point;
- vout0x1 += voutput_zero_point;
- vout0x2 += voutput_zero_point;
- vout0x3 += voutput_zero_point;
- vout1x0 += voutput_zero_point;
- vout1x1 += voutput_zero_point;
- vout1x2 += voutput_zero_point;
- vout1x3 += voutput_zero_point;
- vout2x0 += voutput_zero_point;
- vout2x1 += voutput_zero_point;
- vout2x2 += voutput_zero_point;
- vout2x3 += voutput_zero_point;
- vout3x0 += voutput_zero_point;
- vout3x1 += voutput_zero_point;
- vout3x2 += voutput_zero_point;
- vout3x3 += voutput_zero_point;
-
- if XNN_LIKELY(nc >= 4) {
- c3[0] = (int8_t) vout3x0;
- c3[1] = (int8_t) vout3x1;
- c3[2] = (int8_t) vout3x2;
- c3[3] = (int8_t) vout3x3;
- c2[0] = (int8_t) vout2x0;
- c2[1] = (int8_t) vout2x1;
- c2[2] = (int8_t) vout2x2;
- c2[3] = (int8_t) vout2x3;
- c1[0] = (int8_t) vout1x0;
- c1[1] = (int8_t) vout1x1;
- c1[2] = (int8_t) vout1x2;
- c1[3] = (int8_t) vout1x3;
- c0[0] = (int8_t) vout0x0;
- c0[1] = (int8_t) vout0x1;
- c0[2] = (int8_t) vout0x2;
- c0[3] = (int8_t) vout0x3;
-
- c3 = (int8_t*) ((uintptr_t) c3 + cn_stride);
- c2 = (int8_t*) ((uintptr_t) c2 + cn_stride);
- c1 = (int8_t*) ((uintptr_t) c1 + cn_stride);
- c0 = (int8_t*) ((uintptr_t) c0 + cn_stride);
-
- a = (const int8_t**restrict) ((uintptr_t) a - ks);
- nc -= 4;
- } else {
- if (nc & 2) {
- c3[0] = (int8_t) vout3x0;
- c3[1] = (int8_t) vout3x1;
- vout3x0 = vout3x2;
- c3 += 2;
- c2[0] = (int8_t) vout2x0;
- c2[1] = (int8_t) vout2x1;
- vout2x0 = vout2x2;
- c2 += 2;
- c1[0] = (int8_t) vout1x0;
- c1[1] = (int8_t) vout1x1;
- vout1x0 = vout1x2;
- c1 += 2;
- c0[0] = (int8_t) vout0x0;
- c0[1] = (int8_t) vout0x1;
- vout0x0 = vout0x2;
- c0 += 2;
- }
- if (nc & 1) {
- c3[0] = (int8_t) vout3x0;
- c2[0] = (int8_t) vout2x0;
- c1[0] = (int8_t) vout1x0;
- c0[0] = (int8_t) vout0x0;
- }
-
- nc = 0;
- }
- } while (nc != 0);
-}
diff --git a/src/qs8-igemm/scalar.c.in b/src/qs8-igemm/scalar.c.in
index 2d27ad1..96bf517 100644
--- a/src/qs8-igemm/scalar.c.in
+++ b/src/qs8-igemm/scalar.c.in
@@ -3,16 +3,15 @@
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
-$assert REQUANTIZATION in ["FP32", "RNDNU"]
+$assert REQUANTIZATION == "FP32"
+$assert VARIANT in ["FMAGIC", "LRINTF"]
$assert DATATYPE in ["QC8", "QS8", "QU8"]
-$assert DATATYPE != "QC8" or REQUANTIZATION == "FP32"
#include <assert.h>
-$if REQUANTIZATION == "FP32":
- $if VARIANT == "LRINT":
- #include <math.h>
- $elif VARIANT == "MAGIC":
+$if VARIANT == "LRINTF":
+ #include <math.h>
+$elif VARIANT == "FMAGIC":
- #include <fp16.h>
+ #include <fp16.h>
#include <xnnpack/math.h>
#include <xnnpack/gemm.h>
@@ -106,95 +105,68 @@
p -= ${MR} * sizeof(void*);
} while (p != 0);
- $if REQUANTIZATION == "RNDNU":
- const int32_t vmultiplier = params->${PARAMS_STRUCT}.multiplier;
- $for M in range(MR):
- $for N in range(NR):
- const int64_t vproduct${M}x${N} = (int64_t) vacc${M}x${N} * (int64_t) vmultiplier;
+ $for M in range(MR):
+ $for N in range(NR):
+ float vfpacc${M}x${N} = (float) vacc${M}x${N};
- const uint32_t shift = params->${PARAMS_STRUCT}.shift;
- const int64_t rounding = params->${PARAMS_STRUCT}.rounding;
+ $if DATATYPE == "QC8":
+ $if NR % 4 != 0:
+ typedef XNN_UNALIGNED float unaligned_float;
+ $for N in range(NR):
+ const float vscale${N} = ((const unaligned_float*) w)[${N}];
+ $for M in range(MR):
+ vfpacc${M}x${N} *= vscale${N};
+ $else:
+ $for N in range(NR):
+ const float vscale${N} = ((const float*) w)[${N}];
+ $for M in range(MR):
+ vfpacc${M}x${N} *= vscale${N};
+ w = (const void*) ((const float*) w + ${NR});
+ $else:
+ const float vscale = params->${PARAMS_STRUCT}.scale;
$for M in range(MR):
$for N in range(NR):
- int32_t vout${M}x${N} = (int32_t) asr_s64(vproduct${M}x${N} + rounding, shift);
+ vfpacc${M}x${N} *= vscale;
- const int32_t voutput_min_less_zero_point = params->${PARAMS_STRUCT}.output_min_less_zero_point;
+ $if VARIANT == "FMAGIC":
+ const float voutput_min_less_zero_point = params->${PARAMS_STRUCT}.output_min_less_zero_point;
$for M in range(MR):
$for N in range(NR):
- vout${M}x${N} = math_max_s32(vout${M}x${N}, voutput_min_less_zero_point);
+ vfpacc${M}x${N} = math_max_f32(vfpacc${M}x${N}, voutput_min_less_zero_point);
- const int32_t voutput_max_less_zero_point = params->${PARAMS_STRUCT}.output_max_less_zero_point;
+ const float voutput_max_less_zero_point = params->${PARAMS_STRUCT}.output_max_less_zero_point;
$for M in range(MR):
$for N in range(NR):
- vout${M}x${N} = math_min_s32(vout${M}x${N}, voutput_max_less_zero_point);
+ vfpacc${M}x${N} = math_min_f32(vfpacc${M}x${N}, voutput_max_less_zero_point);
+
+ const float vmagic_bias = params->${PARAMS_STRUCT}.magic_bias;
+ $for M in range(MR):
+ $for N in range(NR):
+ vfpacc${M}x${N} += vmagic_bias;
+
+ const int32_t vmagic_bias_less_output_zero_point = params->${PARAMS_STRUCT}.magic_bias_less_output_zero_point;
+ $for M in range(MR):
+ $for N in range(NR):
+ int32_t vout${M}x${N} = (int32_t) fp32_to_bits(vfpacc${M}x${N}) - vmagic_bias_less_output_zero_point;
+ $elif VARIANT == "LRINTF":
+ const float voutput_min_less_zero_point = params->${PARAMS_STRUCT}.output_min_less_zero_point;
+ $for M in range(MR):
+ $for N in range(NR):
+ vfpacc${M}x${N} = math_max_f32(vfpacc${M}x${N}, voutput_min_less_zero_point);
+
+ const float voutput_max_less_zero_point = params->${PARAMS_STRUCT}.output_max_less_zero_point;
+ $for M in range(MR):
+ $for N in range(NR):
+ vfpacc${M}x${N} = math_min_f32(vfpacc${M}x${N}, voutput_max_less_zero_point);
+
+ $for M in range(MR):
+ $for N in range(NR):
+ const int32_t vrndacc${M}x${N} = (int32_t) lrintf(vfpacc${M}x${N});
const int32_t voutput_zero_point = params->${PARAMS_STRUCT}.output_zero_point;
$for M in range(MR):
$for N in range(NR):
- vout${M}x${N} += voutput_zero_point;
- $elif REQUANTIZATION == "FP32":
- $for M in range(MR):
- $for N in range(NR):
- float vfpacc${M}x${N} = (float) vacc${M}x${N};
-
- $if DATATYPE == "QC8":
- $if NR % 4 != 0:
- typedef XNN_UNALIGNED float unaligned_float;
- $for N in range(NR):
- const float vscale${N} = ((const unaligned_float*) w)[${N}];
- $for M in range(MR):
- vfpacc${M}x${N} *= vscale${N};
- $else:
- $for N in range(NR):
- const float vscale${N} = ((const float*) w)[${N}];
- $for M in range(MR):
- vfpacc${M}x${N} *= vscale${N};
- w = (const void*) ((const float*) w + ${NR});
- $else:
- const float vscale = params->${PARAMS_STRUCT}.scale;
- $for M in range(MR):
- $for N in range(NR):
- vfpacc${M}x${N} *= vscale;
-
- $if VARIANT == "MAGIC":
- const float voutput_min_less_zero_point = params->${PARAMS_STRUCT}.output_min_less_zero_point;
- $for M in range(MR):
- $for N in range(NR):
- vfpacc${M}x${N} = math_max_f32(vfpacc${M}x${N}, voutput_min_less_zero_point);
-
- const float voutput_max_less_zero_point = params->${PARAMS_STRUCT}.output_max_less_zero_point;
- $for M in range(MR):
- $for N in range(NR):
- vfpacc${M}x${N} = math_min_f32(vfpacc${M}x${N}, voutput_max_less_zero_point);
-
- const float vmagic_bias = params->${PARAMS_STRUCT}.magic_bias;
- $for M in range(MR):
- $for N in range(NR):
- vfpacc${M}x${N} += vmagic_bias;
-
- const int32_t vmagic_bias_less_output_zero_point = params->${PARAMS_STRUCT}.magic_bias_less_output_zero_point;
- $for M in range(MR):
- $for N in range(NR):
- int32_t vout${M}x${N} = (int32_t) fp32_to_bits(vfpacc${M}x${N}) - vmagic_bias_less_output_zero_point;
- $elif VARIANT == "LRINT":
- $for M in range(MR):
- $for N in range(NR):
- long vrndacc${M}x${N} = lrintf(vfpacc${M}x${N});
-
- const long voutput_min_less_zero_point = params->${PARAMS_STRUCT}.output_min_less_zero_point;
- $for M in range(MR):
- $for N in range(NR):
- vrndacc${M}x${N} = XNN_UNPREDICTABLE(vrndacc${M}x${N} < voutput_min_less_zero_point) ? voutput_min_less_zero_point : vrndacc${M}x${N};
-
- const long voutput_max_less_zero_point = params->${PARAMS_STRUCT}.output_max_less_zero_point;
- $for M in range(MR):
- $for N in range(NR):
- vrndacc${M}x${N} = XNN_UNPREDICTABLE(vrndacc${M}x${N} > voutput_max_less_zero_point) ? voutput_max_less_zero_point : vrndacc${M}x${N};
-
- const int32_t voutput_zero_point = params->${PARAMS_STRUCT}.output_zero_point;
- $for M in range(MR):
- $for N in range(NR):
- int32_t vout${M}x${N} = (int32_t) vrndacc${M}x${N} + voutput_zero_point;
+ int32_t vout${M}x${N} = vrndacc${M}x${N} + voutput_zero_point;
if XNN_LIKELY(nc >= ${NR}) {
$for M in reversed(range(MR)):
diff --git a/src/qs8-requantization/fp32-scalar-magic.c b/src/qs8-requantization/fp32-scalar-fmagic.c
similarity index 97%
rename from src/qs8-requantization/fp32-scalar-magic.c
rename to src/qs8-requantization/fp32-scalar-fmagic.c
index 4b9ec89..7fb68fc 100644
--- a/src/qs8-requantization/fp32-scalar-magic.c
+++ b/src/qs8-requantization/fp32-scalar-fmagic.c
@@ -17,7 +17,7 @@
#include <xnnpack/requantization-stubs.h>
-void xnn_qs8_requantize_fp32__scalar_magic(
+void xnn_qs8_requantize_fp32__scalar_fmagic(
size_t n,
const int32_t* input,
float scale,
diff --git a/src/qs8-requantization/fp32-scalar-lrintf.c b/src/qs8-requantization/fp32-scalar-lrintf.c
index 29937fe..0817498 100644
--- a/src/qs8-requantization/fp32-scalar-lrintf.c
+++ b/src/qs8-requantization/fp32-scalar-lrintf.c
@@ -13,6 +13,7 @@
#include <fp16/bitcasts.h>
+#include <xnnpack/math.h>
#include <xnnpack/requantization-stubs.h>
@@ -29,8 +30,8 @@
assert(scale < 1.0f);
assert(scale >= 0x1.0p-32f);
- const long lmin = (long) ((int32_t) qmin - (int32_t) zero_point);
- const long lmax = (long) ((int32_t) qmax - (int32_t) zero_point);
+ const float fmin = (float) ((int32_t) qmin - (int32_t) zero_point);
+ const float fmax = (float) ((int32_t) qmax - (int32_t) zero_point);
for (; n != 0; n -= 4) {
const int32_t x = input[0];
const int32_t y = input[1];
@@ -43,20 +44,20 @@
const float z_scaled = (float) z * scale;
const float w_scaled = (float) w * scale;
- const long x_rounded = lrintf(x_scaled);
- const long y_rounded = lrintf(y_scaled);
- const long z_rounded = lrintf(z_scaled);
- const long w_rounded = lrintf(w_scaled);
+ const float x_clamped = math_min_f32(math_max_f32(x_scaled, fmin), fmax);
+ const float y_clamped = math_min_f32(math_max_f32(y_scaled, fmin), fmax);
+ const float z_clamped = math_min_f32(math_max_f32(z_scaled, fmin), fmax);
+ const float w_clamped = math_min_f32(math_max_f32(w_scaled, fmin), fmax);
- const int32_t x_clamped = (int32_t) (x_rounded < lmin ? lmin : x_rounded > lmax ? lmax : x_rounded);
- const int32_t y_clamped = (int32_t) (y_rounded < lmin ? lmin : y_rounded > lmax ? lmax : y_rounded);
- const int32_t z_clamped = (int32_t) (z_rounded < lmin ? lmin : z_rounded > lmax ? lmax : z_rounded);
- const int32_t w_clamped = (int32_t) (w_rounded < lmin ? lmin : w_rounded > lmax ? lmax : w_rounded);
+ const int32_t x_rounded = (int32_t) lrintf(x_clamped);
+ const int32_t y_rounded = (int32_t) lrintf(y_clamped);
+ const int32_t z_rounded = (int32_t) lrintf(z_clamped);
+ const int32_t w_rounded = (int32_t) lrintf(w_clamped);
- const int32_t x_biased = x_clamped + (int32_t) zero_point;
- const int32_t y_biased = y_clamped + (int32_t) zero_point;
- const int32_t z_biased = z_clamped + (int32_t) zero_point;
- const int32_t w_biased = w_clamped + (int32_t) zero_point;
+ const int32_t x_biased = x_rounded + (int32_t) zero_point;
+ const int32_t y_biased = y_rounded + (int32_t) zero_point;
+ const int32_t z_biased = z_rounded + (int32_t) zero_point;
+ const int32_t w_biased = w_rounded + (int32_t) zero_point;
output[0] = (int8_t) x_biased;
output[1] = (int8_t) y_biased;
diff --git a/src/qu8-dwconv/gen/up1x25-minmax-fp32-scalar-lrint.c b/src/qu8-dwconv/gen/up1x25-minmax-fp32-scalar-fmagic.c
similarity index 91%
copy from src/qu8-dwconv/gen/up1x25-minmax-fp32-scalar-lrint.c
copy to src/qu8-dwconv/gen/up1x25-minmax-fp32-scalar-fmagic.c
index 9a3cf3c..536fe00 100644
--- a/src/qu8-dwconv/gen/up1x25-minmax-fp32-scalar-lrint.c
+++ b/src/qu8-dwconv/gen/up1x25-minmax-fp32-scalar-fmagic.c
@@ -8,13 +8,14 @@
// LICENSE file in the root directory of this source tree.
#include <assert.h>
-#include <math.h>
+
+#include <fp16.h>
#include <xnnpack/dwconv.h>
#include <xnnpack/math.h>
-void xnn_qu8_dwconv_minmax_fp32_ukernel_up1x25__scalar_lrint(
+void xnn_qu8_dwconv_minmax_fp32_ukernel_up1x25__scalar_fmagic(
size_t channels,
size_t output_width,
const uint8_t** input,
@@ -29,11 +30,12 @@
assert(channels != 0);
assert(output_width != 0);
- const float vscale = params->fp32_scalar_lrint.scale;
- const long voutput_min_less_zero_point = params->fp32_scalar_lrint.output_min_less_zero_point;
- const long voutput_max_less_zero_point = params->fp32_scalar_lrint.output_max_less_zero_point;
- const int32_t voutput_zero_point = params->fp32_scalar_lrint.output_zero_point;
- const int32_t vkernel_zero_point = params->fp32_scalar_lrint.kernel_zero_point;
+ const float vscale = params->fp32_scalar_fmagic.scale;
+ const float voutput_min_less_zero_point = params->fp32_scalar_fmagic.output_min_less_zero_point;
+ const float voutput_max_less_zero_point = params->fp32_scalar_fmagic.output_max_less_zero_point;
+ const float vmagic_bias = params->fp32_scalar_fmagic.magic_bias;
+ const int32_t vmagic_bias_less_output_zero_point = params->fp32_scalar_fmagic.magic_bias_less_output_zero_point;
+ const int32_t vkernel_zero_point = params->fp32_scalar_fmagic.kernel_zero_point;
do {
const uint8_t* i0 = input[0];
assert(i0 != NULL);
@@ -245,11 +247,11 @@
w = (const void*) ((uintptr_t) w + sizeof(int32_t) + 25 * sizeof(uint8_t));
- const float vfpacc = (float) vacc * vscale;
- long vrndacc = lrintf(vfpacc);
- vrndacc = XNN_UNPREDICTABLE(vrndacc < voutput_min_less_zero_point) ? voutput_min_less_zero_point : vrndacc;
- vrndacc = XNN_UNPREDICTABLE(vrndacc > voutput_max_less_zero_point) ? voutput_max_less_zero_point : vrndacc;
- int32_t vout = (int32_t) vrndacc + voutput_zero_point;
+ float vfpacc = (float) vacc * vscale;
+ vfpacc = math_max_f32(vfpacc, voutput_min_less_zero_point);
+ vfpacc = math_min_f32(vfpacc, voutput_max_less_zero_point);
+ vfpacc += vmagic_bias;
+ int32_t vout = (int32_t) fp32_to_bits(vfpacc) - vmagic_bias_less_output_zero_point;
*output++ = (uint8_t) vout;
} while (--c != 0);
diff --git a/src/qu8-dwconv/gen/up1x25-minmax-fp32-scalar-lrint.c b/src/qu8-dwconv/gen/up1x25-minmax-fp32-scalar-lrintf.c
similarity index 95%
rename from src/qu8-dwconv/gen/up1x25-minmax-fp32-scalar-lrint.c
rename to src/qu8-dwconv/gen/up1x25-minmax-fp32-scalar-lrintf.c
index 9a3cf3c..3ea5639 100644
--- a/src/qu8-dwconv/gen/up1x25-minmax-fp32-scalar-lrint.c
+++ b/src/qu8-dwconv/gen/up1x25-minmax-fp32-scalar-lrintf.c
@@ -14,7 +14,7 @@
#include <xnnpack/math.h>
-void xnn_qu8_dwconv_minmax_fp32_ukernel_up1x25__scalar_lrint(
+void xnn_qu8_dwconv_minmax_fp32_ukernel_up1x25__scalar_lrintf(
size_t channels,
size_t output_width,
const uint8_t** input,
@@ -29,11 +29,11 @@
assert(channels != 0);
assert(output_width != 0);
- const float vscale = params->fp32_scalar_lrint.scale;
- const long voutput_min_less_zero_point = params->fp32_scalar_lrint.output_min_less_zero_point;
- const long voutput_max_less_zero_point = params->fp32_scalar_lrint.output_max_less_zero_point;
- const int32_t voutput_zero_point = params->fp32_scalar_lrint.output_zero_point;
- const int32_t vkernel_zero_point = params->fp32_scalar_lrint.kernel_zero_point;
+ const float vscale = params->fp32_scalar_lrintf.scale;
+ const float voutput_min_less_zero_point = params->fp32_scalar_lrintf.output_min_less_zero_point;
+ const float voutput_max_less_zero_point = params->fp32_scalar_lrintf.output_max_less_zero_point;
+ const int32_t voutput_zero_point = params->fp32_scalar_lrintf.output_zero_point;
+ const int32_t vkernel_zero_point = params->fp32_scalar_lrintf.kernel_zero_point;
do {
const uint8_t* i0 = input[0];
assert(i0 != NULL);
diff --git a/src/qu8-dwconv/gen/up1x9-minmax-fp32-scalar-lrint.c b/src/qu8-dwconv/gen/up1x9-minmax-fp32-scalar-fmagic.c
similarity index 82%
copy from src/qu8-dwconv/gen/up1x9-minmax-fp32-scalar-lrint.c
copy to src/qu8-dwconv/gen/up1x9-minmax-fp32-scalar-fmagic.c
index 31708fd..686bfa9 100644
--- a/src/qu8-dwconv/gen/up1x9-minmax-fp32-scalar-lrint.c
+++ b/src/qu8-dwconv/gen/up1x9-minmax-fp32-scalar-fmagic.c
@@ -8,13 +8,14 @@
// LICENSE file in the root directory of this source tree.
#include <assert.h>
-#include <math.h>
+
+#include <fp16.h>
#include <xnnpack/dwconv.h>
#include <xnnpack/math.h>
-void xnn_qu8_dwconv_minmax_fp32_ukernel_up1x9__scalar_lrint(
+void xnn_qu8_dwconv_minmax_fp32_ukernel_up1x9__scalar_fmagic(
size_t channels,
size_t output_width,
const uint8_t** input,
@@ -29,11 +30,12 @@
assert(channels != 0);
assert(output_width != 0);
- const float vscale = params->fp32_scalar_lrint.scale;
- const long voutput_min_less_zero_point = params->fp32_scalar_lrint.output_min_less_zero_point;
- const long voutput_max_less_zero_point = params->fp32_scalar_lrint.output_max_less_zero_point;
- const int32_t voutput_zero_point = params->fp32_scalar_lrint.output_zero_point;
- const int32_t vkernel_zero_point = params->fp32_scalar_lrint.kernel_zero_point;
+ const float vscale = params->fp32_scalar_fmagic.scale;
+ const float voutput_min_less_zero_point = params->fp32_scalar_fmagic.output_min_less_zero_point;
+ const float voutput_max_less_zero_point = params->fp32_scalar_fmagic.output_max_less_zero_point;
+ const float vmagic_bias = params->fp32_scalar_fmagic.magic_bias;
+ const int32_t vmagic_bias_less_output_zero_point = params->fp32_scalar_fmagic.magic_bias_less_output_zero_point;
+ const int32_t vkernel_zero_point = params->fp32_scalar_fmagic.kernel_zero_point;
do {
const uint8_t* i0 = input[0];
assert(i0 != NULL);
@@ -117,11 +119,11 @@
w = (const void*) ((uintptr_t) w + sizeof(int32_t) + 9 * sizeof(uint8_t));
- const float vfpacc = (float) vacc * vscale;
- long vrndacc = lrintf(vfpacc);
- vrndacc = XNN_UNPREDICTABLE(vrndacc < voutput_min_less_zero_point) ? voutput_min_less_zero_point : vrndacc;
- vrndacc = XNN_UNPREDICTABLE(vrndacc > voutput_max_less_zero_point) ? voutput_max_less_zero_point : vrndacc;
- int32_t vout = (int32_t) vrndacc + voutput_zero_point;
+ float vfpacc = (float) vacc * vscale;
+ vfpacc = math_max_f32(vfpacc, voutput_min_less_zero_point);
+ vfpacc = math_min_f32(vfpacc, voutput_max_less_zero_point);
+ vfpacc += vmagic_bias;
+ int32_t vout = (int32_t) fp32_to_bits(vfpacc) - vmagic_bias_less_output_zero_point;
*output++ = (uint8_t) vout;
} while (--c != 0);
diff --git a/src/qu8-dwconv/gen/up1x9-minmax-fp32-scalar-lrint.c b/src/qu8-dwconv/gen/up1x9-minmax-fp32-scalar-lrintf.c
similarity index 90%
rename from src/qu8-dwconv/gen/up1x9-minmax-fp32-scalar-lrint.c
rename to src/qu8-dwconv/gen/up1x9-minmax-fp32-scalar-lrintf.c
index 31708fd..c1a2d37 100644
--- a/src/qu8-dwconv/gen/up1x9-minmax-fp32-scalar-lrint.c
+++ b/src/qu8-dwconv/gen/up1x9-minmax-fp32-scalar-lrintf.c
@@ -14,7 +14,7 @@
#include <xnnpack/math.h>
-void xnn_qu8_dwconv_minmax_fp32_ukernel_up1x9__scalar_lrint(
+void xnn_qu8_dwconv_minmax_fp32_ukernel_up1x9__scalar_lrintf(
size_t channels,
size_t output_width,
const uint8_t** input,
@@ -29,11 +29,11 @@
assert(channels != 0);
assert(output_width != 0);
- const float vscale = params->fp32_scalar_lrint.scale;
- const long voutput_min_less_zero_point = params->fp32_scalar_lrint.output_min_less_zero_point;
- const long voutput_max_less_zero_point = params->fp32_scalar_lrint.output_max_less_zero_point;
- const int32_t voutput_zero_point = params->fp32_scalar_lrint.output_zero_point;
- const int32_t vkernel_zero_point = params->fp32_scalar_lrint.kernel_zero_point;
+ const float vscale = params->fp32_scalar_lrintf.scale;
+ const float voutput_min_less_zero_point = params->fp32_scalar_lrintf.output_min_less_zero_point;
+ const float voutput_max_less_zero_point = params->fp32_scalar_lrintf.output_max_less_zero_point;
+ const int32_t voutput_zero_point = params->fp32_scalar_lrintf.output_zero_point;
+ const int32_t vkernel_zero_point = params->fp32_scalar_lrintf.kernel_zero_point;
do {
const uint8_t* i0 = input[0];
assert(i0 != NULL);
diff --git a/src/qu8-dwconv/gen/up2x25-minmax-fp32-scalar-lrint.c b/src/qu8-dwconv/gen/up2x25-minmax-fp32-scalar-fmagic.c
similarity index 93%
copy from src/qu8-dwconv/gen/up2x25-minmax-fp32-scalar-lrint.c
copy to src/qu8-dwconv/gen/up2x25-minmax-fp32-scalar-fmagic.c
index 7159c28..13af840 100644
--- a/src/qu8-dwconv/gen/up2x25-minmax-fp32-scalar-lrint.c
+++ b/src/qu8-dwconv/gen/up2x25-minmax-fp32-scalar-fmagic.c
@@ -8,13 +8,14 @@
// LICENSE file in the root directory of this source tree.
#include <assert.h>
-#include <math.h>
+
+#include <fp16.h>
#include <xnnpack/dwconv.h>
#include <xnnpack/math.h>
-void xnn_qu8_dwconv_minmax_fp32_ukernel_up2x25__scalar_lrint(
+void xnn_qu8_dwconv_minmax_fp32_ukernel_up2x25__scalar_fmagic(
size_t channels,
size_t output_width,
const uint8_t** input,
@@ -29,11 +30,12 @@
assert(channels != 0);
assert(output_width != 0);
- const float vscale = params->fp32_scalar_lrint.scale;
- const long voutput_min_less_zero_point = params->fp32_scalar_lrint.output_min_less_zero_point;
- const long voutput_max_less_zero_point = params->fp32_scalar_lrint.output_max_less_zero_point;
- const int32_t voutput_zero_point = params->fp32_scalar_lrint.output_zero_point;
- const int32_t vkernel_zero_point = params->fp32_scalar_lrint.kernel_zero_point;
+ const float vscale = params->fp32_scalar_fmagic.scale;
+ const float voutput_min_less_zero_point = params->fp32_scalar_fmagic.output_min_less_zero_point;
+ const float voutput_max_less_zero_point = params->fp32_scalar_fmagic.output_max_less_zero_point;
+ const float vmagic_bias = params->fp32_scalar_fmagic.magic_bias;
+ const int32_t vmagic_bias_less_output_zero_point = params->fp32_scalar_fmagic.magic_bias_less_output_zero_point;
+ const int32_t vkernel_zero_point = params->fp32_scalar_fmagic.kernel_zero_point;
do {
const uint8_t* i0 = input[0];
assert(i0 != NULL);
@@ -427,17 +429,17 @@
vfpacc0 *= vscale;
vfpacc1 *= vscale;
- long vrndacc0 = lrintf(vfpacc0);
- long vrndacc1 = lrintf(vfpacc1);
+ vfpacc0 = math_max_f32(vfpacc0, voutput_min_less_zero_point);
+ vfpacc1 = math_max_f32(vfpacc1, voutput_min_less_zero_point);
- vrndacc0 = XNN_UNPREDICTABLE(vrndacc0 < voutput_min_less_zero_point) ? voutput_min_less_zero_point : vrndacc0;
- vrndacc1 = XNN_UNPREDICTABLE(vrndacc1 < voutput_min_less_zero_point) ? voutput_min_less_zero_point : vrndacc1;
+ vfpacc0 = math_min_f32(vfpacc0, voutput_max_less_zero_point);
+ vfpacc1 = math_min_f32(vfpacc1, voutput_max_less_zero_point);
- vrndacc0 = XNN_UNPREDICTABLE(vrndacc0 > voutput_max_less_zero_point) ? voutput_max_less_zero_point : vrndacc0;
- vrndacc1 = XNN_UNPREDICTABLE(vrndacc1 > voutput_max_less_zero_point) ? voutput_max_less_zero_point : vrndacc1;
+ vfpacc0 += vmagic_bias;
+ vfpacc1 += vmagic_bias;
- int32_t vout0 = (int32_t) vrndacc0 + voutput_zero_point;
- int32_t vout1 = (int32_t) vrndacc1 + voutput_zero_point;
+ int32_t vout0 = (int32_t) fp32_to_bits(vfpacc0) - vmagic_bias_less_output_zero_point;
+ int32_t vout1 = (int32_t) fp32_to_bits(vfpacc1) - vmagic_bias_less_output_zero_point;
output[0] = (uint8_t) vout0;
output[1] = (uint8_t) vout1;
@@ -522,11 +524,11 @@
const int32_t vk24 = (int32_t) (uint32_t) ((const uint8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[48] - vkernel_zero_point;
vacc += vi24 * vk24;
- const float vfpacc = (float) vacc * vscale;
- long vrndacc = lrintf(vfpacc);
- vrndacc = XNN_UNPREDICTABLE(vrndacc < voutput_min_less_zero_point) ? voutput_min_less_zero_point : vrndacc;
- vrndacc = XNN_UNPREDICTABLE(vrndacc > voutput_max_less_zero_point) ? voutput_max_less_zero_point : vrndacc;
- int32_t vout = (int32_t) vrndacc + voutput_zero_point;
+ float vfpacc = (float) vacc * vscale;
+ vfpacc = math_max_f32(vfpacc, voutput_min_less_zero_point);
+ vfpacc = math_min_f32(vfpacc, voutput_max_less_zero_point);
+ vfpacc += vmagic_bias;
+ int32_t vout = (int32_t) fp32_to_bits(vfpacc) - vmagic_bias_less_output_zero_point;
*output++ = (uint8_t) vout;
}
diff --git a/src/qu8-dwconv/gen/up2x25-minmax-fp32-scalar-lrint.c b/src/qu8-dwconv/gen/up2x25-minmax-fp32-scalar-lrintf.c
similarity index 93%
rename from src/qu8-dwconv/gen/up2x25-minmax-fp32-scalar-lrint.c
rename to src/qu8-dwconv/gen/up2x25-minmax-fp32-scalar-lrintf.c
index 7159c28..fdada3c 100644
--- a/src/qu8-dwconv/gen/up2x25-minmax-fp32-scalar-lrint.c
+++ b/src/qu8-dwconv/gen/up2x25-minmax-fp32-scalar-lrintf.c
@@ -14,7 +14,7 @@
#include <xnnpack/math.h>
-void xnn_qu8_dwconv_minmax_fp32_ukernel_up2x25__scalar_lrint(
+void xnn_qu8_dwconv_minmax_fp32_ukernel_up2x25__scalar_lrintf(
size_t channels,
size_t output_width,
const uint8_t** input,
@@ -29,11 +29,11 @@
assert(channels != 0);
assert(output_width != 0);
- const float vscale = params->fp32_scalar_lrint.scale;
- const long voutput_min_less_zero_point = params->fp32_scalar_lrint.output_min_less_zero_point;
- const long voutput_max_less_zero_point = params->fp32_scalar_lrint.output_max_less_zero_point;
- const int32_t voutput_zero_point = params->fp32_scalar_lrint.output_zero_point;
- const int32_t vkernel_zero_point = params->fp32_scalar_lrint.kernel_zero_point;
+ const float vscale = params->fp32_scalar_lrintf.scale;
+ const float voutput_min_less_zero_point = params->fp32_scalar_lrintf.output_min_less_zero_point;
+ const float voutput_max_less_zero_point = params->fp32_scalar_lrintf.output_max_less_zero_point;
+ const int32_t voutput_zero_point = params->fp32_scalar_lrintf.output_zero_point;
+ const int32_t vkernel_zero_point = params->fp32_scalar_lrintf.kernel_zero_point;
do {
const uint8_t* i0 = input[0];
assert(i0 != NULL);
@@ -427,14 +427,14 @@
vfpacc0 *= vscale;
vfpacc1 *= vscale;
- long vrndacc0 = lrintf(vfpacc0);
- long vrndacc1 = lrintf(vfpacc1);
+ vfpacc0 = math_max_f32(vfpacc0, voutput_min_less_zero_point);
+ vfpacc1 = math_max_f32(vfpacc1, voutput_min_less_zero_point);
- vrndacc0 = XNN_UNPREDICTABLE(vrndacc0 < voutput_min_less_zero_point) ? voutput_min_less_zero_point : vrndacc0;
- vrndacc1 = XNN_UNPREDICTABLE(vrndacc1 < voutput_min_less_zero_point) ? voutput_min_less_zero_point : vrndacc1;
+ vfpacc0 = math_min_f32(vfpacc0, voutput_max_less_zero_point);
+ vfpacc1 = math_min_f32(vfpacc1, voutput_max_less_zero_point);
- vrndacc0 = XNN_UNPREDICTABLE(vrndacc0 > voutput_max_less_zero_point) ? voutput_max_less_zero_point : vrndacc0;
- vrndacc1 = XNN_UNPREDICTABLE(vrndacc1 > voutput_max_less_zero_point) ? voutput_max_less_zero_point : vrndacc1;
+ const int32_t vrndacc0 = (int32_t) lrintf(vfpacc0);
+ const int32_t vrndacc1 = (int32_t) lrintf(vfpacc1);
int32_t vout0 = (int32_t) vrndacc0 + voutput_zero_point;
int32_t vout1 = (int32_t) vrndacc1 + voutput_zero_point;
@@ -522,11 +522,11 @@
const int32_t vk24 = (int32_t) (uint32_t) ((const uint8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[48] - vkernel_zero_point;
vacc += vi24 * vk24;
- const float vfpacc = (float) vacc * vscale;
- long vrndacc = lrintf(vfpacc);
- vrndacc = XNN_UNPREDICTABLE(vrndacc < voutput_min_less_zero_point) ? voutput_min_less_zero_point : vrndacc;
- vrndacc = XNN_UNPREDICTABLE(vrndacc > voutput_max_less_zero_point) ? voutput_max_less_zero_point : vrndacc;
- int32_t vout = (int32_t) vrndacc + voutput_zero_point;
+ float vfpacc = (float) vacc * vscale;
+ vfpacc = math_max_f32(vfpacc, voutput_min_less_zero_point);
+ vfpacc = math_min_f32(vfpacc, voutput_max_less_zero_point);
+ const int32_t vrndacc = (int32_t) lrintf(vfpacc);
+ int32_t vout = vrndacc + voutput_zero_point;
*output++ = (uint8_t) vout;
}
diff --git a/src/qu8-dwconv/gen/up2x9-minmax-fp32-scalar-lrint.c b/src/qu8-dwconv/gen/up2x9-minmax-fp32-scalar-fmagic.c
similarity index 84%
copy from src/qu8-dwconv/gen/up2x9-minmax-fp32-scalar-lrint.c
copy to src/qu8-dwconv/gen/up2x9-minmax-fp32-scalar-fmagic.c
index e58bcbc..87c0c3e 100644
--- a/src/qu8-dwconv/gen/up2x9-minmax-fp32-scalar-lrint.c
+++ b/src/qu8-dwconv/gen/up2x9-minmax-fp32-scalar-fmagic.c
@@ -8,13 +8,14 @@
// LICENSE file in the root directory of this source tree.
#include <assert.h>
-#include <math.h>
+
+#include <fp16.h>
#include <xnnpack/dwconv.h>
#include <xnnpack/math.h>
-void xnn_qu8_dwconv_minmax_fp32_ukernel_up2x9__scalar_lrint(
+void xnn_qu8_dwconv_minmax_fp32_ukernel_up2x9__scalar_fmagic(
size_t channels,
size_t output_width,
const uint8_t** input,
@@ -29,11 +30,12 @@
assert(channels != 0);
assert(output_width != 0);
- const float vscale = params->fp32_scalar_lrint.scale;
- const long voutput_min_less_zero_point = params->fp32_scalar_lrint.output_min_less_zero_point;
- const long voutput_max_less_zero_point = params->fp32_scalar_lrint.output_max_less_zero_point;
- const int32_t voutput_zero_point = params->fp32_scalar_lrint.output_zero_point;
- const int32_t vkernel_zero_point = params->fp32_scalar_lrint.kernel_zero_point;
+ const float vscale = params->fp32_scalar_fmagic.scale;
+ const float voutput_min_less_zero_point = params->fp32_scalar_fmagic.output_min_less_zero_point;
+ const float voutput_max_less_zero_point = params->fp32_scalar_fmagic.output_max_less_zero_point;
+ const float vmagic_bias = params->fp32_scalar_fmagic.magic_bias;
+ const int32_t vmagic_bias_less_output_zero_point = params->fp32_scalar_fmagic.magic_bias_less_output_zero_point;
+ const int32_t vkernel_zero_point = params->fp32_scalar_fmagic.kernel_zero_point;
do {
const uint8_t* i0 = input[0];
assert(i0 != NULL);
@@ -187,17 +189,17 @@
vfpacc0 *= vscale;
vfpacc1 *= vscale;
- long vrndacc0 = lrintf(vfpacc0);
- long vrndacc1 = lrintf(vfpacc1);
+ vfpacc0 = math_max_f32(vfpacc0, voutput_min_less_zero_point);
+ vfpacc1 = math_max_f32(vfpacc1, voutput_min_less_zero_point);
- vrndacc0 = XNN_UNPREDICTABLE(vrndacc0 < voutput_min_less_zero_point) ? voutput_min_less_zero_point : vrndacc0;
- vrndacc1 = XNN_UNPREDICTABLE(vrndacc1 < voutput_min_less_zero_point) ? voutput_min_less_zero_point : vrndacc1;
+ vfpacc0 = math_min_f32(vfpacc0, voutput_max_less_zero_point);
+ vfpacc1 = math_min_f32(vfpacc1, voutput_max_less_zero_point);
- vrndacc0 = XNN_UNPREDICTABLE(vrndacc0 > voutput_max_less_zero_point) ? voutput_max_less_zero_point : vrndacc0;
- vrndacc1 = XNN_UNPREDICTABLE(vrndacc1 > voutput_max_less_zero_point) ? voutput_max_less_zero_point : vrndacc1;
+ vfpacc0 += vmagic_bias;
+ vfpacc1 += vmagic_bias;
- int32_t vout0 = (int32_t) vrndacc0 + voutput_zero_point;
- int32_t vout1 = (int32_t) vrndacc1 + voutput_zero_point;
+ int32_t vout0 = (int32_t) fp32_to_bits(vfpacc0) - vmagic_bias_less_output_zero_point;
+ int32_t vout1 = (int32_t) fp32_to_bits(vfpacc1) - vmagic_bias_less_output_zero_point;
output[0] = (uint8_t) vout0;
output[1] = (uint8_t) vout1;
@@ -234,11 +236,11 @@
const int32_t vk8 = (int32_t) (uint32_t) ((const uint8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[16] - vkernel_zero_point;
vacc += vi8 * vk8;
- const float vfpacc = (float) vacc * vscale;
- long vrndacc = lrintf(vfpacc);
- vrndacc = XNN_UNPREDICTABLE(vrndacc < voutput_min_less_zero_point) ? voutput_min_less_zero_point : vrndacc;
- vrndacc = XNN_UNPREDICTABLE(vrndacc > voutput_max_less_zero_point) ? voutput_max_less_zero_point : vrndacc;
- int32_t vout = (int32_t) vrndacc + voutput_zero_point;
+ float vfpacc = (float) vacc * vscale;
+ vfpacc = math_max_f32(vfpacc, voutput_min_less_zero_point);
+ vfpacc = math_min_f32(vfpacc, voutput_max_less_zero_point);
+ vfpacc += vmagic_bias;
+ int32_t vout = (int32_t) fp32_to_bits(vfpacc) - vmagic_bias_less_output_zero_point;
*output++ = (uint8_t) vout;
}
diff --git a/src/qu8-dwconv/gen/up2x9-minmax-fp32-scalar-lrint.c b/src/qu8-dwconv/gen/up2x9-minmax-fp32-scalar-lrintf.c
similarity index 86%
rename from src/qu8-dwconv/gen/up2x9-minmax-fp32-scalar-lrint.c
rename to src/qu8-dwconv/gen/up2x9-minmax-fp32-scalar-lrintf.c
index e58bcbc..8c18e5f 100644
--- a/src/qu8-dwconv/gen/up2x9-minmax-fp32-scalar-lrint.c
+++ b/src/qu8-dwconv/gen/up2x9-minmax-fp32-scalar-lrintf.c
@@ -14,7 +14,7 @@
#include <xnnpack/math.h>
-void xnn_qu8_dwconv_minmax_fp32_ukernel_up2x9__scalar_lrint(
+void xnn_qu8_dwconv_minmax_fp32_ukernel_up2x9__scalar_lrintf(
size_t channels,
size_t output_width,
const uint8_t** input,
@@ -29,11 +29,11 @@
assert(channels != 0);
assert(output_width != 0);
- const float vscale = params->fp32_scalar_lrint.scale;
- const long voutput_min_less_zero_point = params->fp32_scalar_lrint.output_min_less_zero_point;
- const long voutput_max_less_zero_point = params->fp32_scalar_lrint.output_max_less_zero_point;
- const int32_t voutput_zero_point = params->fp32_scalar_lrint.output_zero_point;
- const int32_t vkernel_zero_point = params->fp32_scalar_lrint.kernel_zero_point;
+ const float vscale = params->fp32_scalar_lrintf.scale;
+ const float voutput_min_less_zero_point = params->fp32_scalar_lrintf.output_min_less_zero_point;
+ const float voutput_max_less_zero_point = params->fp32_scalar_lrintf.output_max_less_zero_point;
+ const int32_t voutput_zero_point = params->fp32_scalar_lrintf.output_zero_point;
+ const int32_t vkernel_zero_point = params->fp32_scalar_lrintf.kernel_zero_point;
do {
const uint8_t* i0 = input[0];
assert(i0 != NULL);
@@ -187,14 +187,14 @@
vfpacc0 *= vscale;
vfpacc1 *= vscale;
- long vrndacc0 = lrintf(vfpacc0);
- long vrndacc1 = lrintf(vfpacc1);
+ vfpacc0 = math_max_f32(vfpacc0, voutput_min_less_zero_point);
+ vfpacc1 = math_max_f32(vfpacc1, voutput_min_less_zero_point);
- vrndacc0 = XNN_UNPREDICTABLE(vrndacc0 < voutput_min_less_zero_point) ? voutput_min_less_zero_point : vrndacc0;
- vrndacc1 = XNN_UNPREDICTABLE(vrndacc1 < voutput_min_less_zero_point) ? voutput_min_less_zero_point : vrndacc1;
+ vfpacc0 = math_min_f32(vfpacc0, voutput_max_less_zero_point);
+ vfpacc1 = math_min_f32(vfpacc1, voutput_max_less_zero_point);
- vrndacc0 = XNN_UNPREDICTABLE(vrndacc0 > voutput_max_less_zero_point) ? voutput_max_less_zero_point : vrndacc0;
- vrndacc1 = XNN_UNPREDICTABLE(vrndacc1 > voutput_max_less_zero_point) ? voutput_max_less_zero_point : vrndacc1;
+ const int32_t vrndacc0 = (int32_t) lrintf(vfpacc0);
+ const int32_t vrndacc1 = (int32_t) lrintf(vfpacc1);
int32_t vout0 = (int32_t) vrndacc0 + voutput_zero_point;
int32_t vout1 = (int32_t) vrndacc1 + voutput_zero_point;
@@ -234,11 +234,11 @@
const int32_t vk8 = (int32_t) (uint32_t) ((const uint8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[16] - vkernel_zero_point;
vacc += vi8 * vk8;
- const float vfpacc = (float) vacc * vscale;
- long vrndacc = lrintf(vfpacc);
- vrndacc = XNN_UNPREDICTABLE(vrndacc < voutput_min_less_zero_point) ? voutput_min_less_zero_point : vrndacc;
- vrndacc = XNN_UNPREDICTABLE(vrndacc > voutput_max_less_zero_point) ? voutput_max_less_zero_point : vrndacc;
- int32_t vout = (int32_t) vrndacc + voutput_zero_point;
+ float vfpacc = (float) vacc * vscale;
+ vfpacc = math_max_f32(vfpacc, voutput_min_less_zero_point);
+ vfpacc = math_min_f32(vfpacc, voutput_max_less_zero_point);
+ const int32_t vrndacc = (int32_t) lrintf(vfpacc);
+ int32_t vout = vrndacc + voutput_zero_point;
*output++ = (uint8_t) vout;
}
diff --git a/src/qu8-dwconv/gen/up4x25-minmax-fp32-scalar-lrint.c b/src/qu8-dwconv/gen/up4x25-minmax-fp32-scalar-fmagic.c
similarity index 93%
rename from src/qu8-dwconv/gen/up4x25-minmax-fp32-scalar-lrint.c
rename to src/qu8-dwconv/gen/up4x25-minmax-fp32-scalar-fmagic.c
index eee6935..cf90e93 100644
--- a/src/qu8-dwconv/gen/up4x25-minmax-fp32-scalar-lrint.c
+++ b/src/qu8-dwconv/gen/up4x25-minmax-fp32-scalar-fmagic.c
@@ -8,13 +8,14 @@
// LICENSE file in the root directory of this source tree.
#include <assert.h>
-#include <math.h>
+
+#include <fp16.h>
#include <xnnpack/dwconv.h>
#include <xnnpack/math.h>
-void xnn_qu8_dwconv_minmax_fp32_ukernel_up4x25__scalar_lrint(
+void xnn_qu8_dwconv_minmax_fp32_ukernel_up4x25__scalar_fmagic(
size_t channels,
size_t output_width,
const uint8_t** input,
@@ -29,11 +30,12 @@
assert(channels != 0);
assert(output_width != 0);
- const float vscale = params->fp32_scalar_lrint.scale;
- const long voutput_min_less_zero_point = params->fp32_scalar_lrint.output_min_less_zero_point;
- const long voutput_max_less_zero_point = params->fp32_scalar_lrint.output_max_less_zero_point;
- const int32_t voutput_zero_point = params->fp32_scalar_lrint.output_zero_point;
- const int32_t vkernel_zero_point = params->fp32_scalar_lrint.kernel_zero_point;
+ const float vscale = params->fp32_scalar_fmagic.scale;
+ const float voutput_min_less_zero_point = params->fp32_scalar_fmagic.output_min_less_zero_point;
+ const float voutput_max_less_zero_point = params->fp32_scalar_fmagic.output_max_less_zero_point;
+ const float vmagic_bias = params->fp32_scalar_fmagic.magic_bias;
+ const int32_t vmagic_bias_less_output_zero_point = params->fp32_scalar_fmagic.magic_bias_less_output_zero_point;
+ const int32_t vkernel_zero_point = params->fp32_scalar_fmagic.kernel_zero_point;
do {
const uint8_t* i0 = input[0];
assert(i0 != NULL);
@@ -583,25 +585,25 @@
vfpacc2 *= vscale;
vfpacc3 *= vscale;
- long vrndacc0 = lrintf(vfpacc0);
- long vrndacc1 = lrintf(vfpacc1);
- long vrndacc2 = lrintf(vfpacc2);
- long vrndacc3 = lrintf(vfpacc3);
+ vfpacc0 = math_max_f32(vfpacc0, voutput_min_less_zero_point);
+ vfpacc1 = math_max_f32(vfpacc1, voutput_min_less_zero_point);
+ vfpacc2 = math_max_f32(vfpacc2, voutput_min_less_zero_point);
+ vfpacc3 = math_max_f32(vfpacc3, voutput_min_less_zero_point);
- vrndacc0 = XNN_UNPREDICTABLE(vrndacc0 < voutput_min_less_zero_point) ? voutput_min_less_zero_point : vrndacc0;
- vrndacc1 = XNN_UNPREDICTABLE(vrndacc1 < voutput_min_less_zero_point) ? voutput_min_less_zero_point : vrndacc1;
- vrndacc2 = XNN_UNPREDICTABLE(vrndacc2 < voutput_min_less_zero_point) ? voutput_min_less_zero_point : vrndacc2;
- vrndacc3 = XNN_UNPREDICTABLE(vrndacc3 < voutput_min_less_zero_point) ? voutput_min_less_zero_point : vrndacc3;
+ vfpacc0 = math_min_f32(vfpacc0, voutput_max_less_zero_point);
+ vfpacc1 = math_min_f32(vfpacc1, voutput_max_less_zero_point);
+ vfpacc2 = math_min_f32(vfpacc2, voutput_max_less_zero_point);
+ vfpacc3 = math_min_f32(vfpacc3, voutput_max_less_zero_point);
- vrndacc0 = XNN_UNPREDICTABLE(vrndacc0 > voutput_max_less_zero_point) ? voutput_max_less_zero_point : vrndacc0;
- vrndacc1 = XNN_UNPREDICTABLE(vrndacc1 > voutput_max_less_zero_point) ? voutput_max_less_zero_point : vrndacc1;
- vrndacc2 = XNN_UNPREDICTABLE(vrndacc2 > voutput_max_less_zero_point) ? voutput_max_less_zero_point : vrndacc2;
- vrndacc3 = XNN_UNPREDICTABLE(vrndacc3 > voutput_max_less_zero_point) ? voutput_max_less_zero_point : vrndacc3;
+ vfpacc0 += vmagic_bias;
+ vfpacc1 += vmagic_bias;
+ vfpacc2 += vmagic_bias;
+ vfpacc3 += vmagic_bias;
- int32_t vout0 = (int32_t) vrndacc0 + voutput_zero_point;
- int32_t vout1 = (int32_t) vrndacc1 + voutput_zero_point;
- int32_t vout2 = (int32_t) vrndacc2 + voutput_zero_point;
- int32_t vout3 = (int32_t) vrndacc3 + voutput_zero_point;
+ int32_t vout0 = (int32_t) fp32_to_bits(vfpacc0) - vmagic_bias_less_output_zero_point;
+ int32_t vout1 = (int32_t) fp32_to_bits(vfpacc1) - vmagic_bias_less_output_zero_point;
+ int32_t vout2 = (int32_t) fp32_to_bits(vfpacc2) - vmagic_bias_less_output_zero_point;
+ int32_t vout3 = (int32_t) fp32_to_bits(vfpacc3) - vmagic_bias_less_output_zero_point;
output[0] = (uint8_t) vout0;
output[1] = (uint8_t) vout1;
@@ -692,11 +694,11 @@
vacc += vi24 * vk24;
k += 1;
- const float vfpacc = (float) vacc * vscale;
- long vrndacc = lrintf(vfpacc);
- vrndacc = XNN_UNPREDICTABLE(vrndacc < voutput_min_less_zero_point) ? voutput_min_less_zero_point : vrndacc;
- vrndacc = XNN_UNPREDICTABLE(vrndacc > voutput_max_less_zero_point) ? voutput_max_less_zero_point : vrndacc;
- int32_t vout = (int32_t) vrndacc + voutput_zero_point;
+ float vfpacc = (float) vacc * vscale;
+ vfpacc = math_max_f32(vfpacc, voutput_min_less_zero_point);
+ vfpacc = math_min_f32(vfpacc, voutput_max_less_zero_point);
+ vfpacc += vmagic_bias;
+ int32_t vout = (int32_t) fp32_to_bits(vfpacc) - vmagic_bias_less_output_zero_point;
*output++ = (uint8_t) vout;
} while (--c != 0);
diff --git a/src/qu8-dwconv/gen/up4x25-minmax-fp32-scalar-lrint.c b/src/qu8-dwconv/gen/up4x25-minmax-fp32-scalar-lrintf.c
similarity index 93%
copy from src/qu8-dwconv/gen/up4x25-minmax-fp32-scalar-lrint.c
copy to src/qu8-dwconv/gen/up4x25-minmax-fp32-scalar-lrintf.c
index eee6935..221b69c 100644
--- a/src/qu8-dwconv/gen/up4x25-minmax-fp32-scalar-lrint.c
+++ b/src/qu8-dwconv/gen/up4x25-minmax-fp32-scalar-lrintf.c
@@ -14,7 +14,7 @@
#include <xnnpack/math.h>
-void xnn_qu8_dwconv_minmax_fp32_ukernel_up4x25__scalar_lrint(
+void xnn_qu8_dwconv_minmax_fp32_ukernel_up4x25__scalar_lrintf(
size_t channels,
size_t output_width,
const uint8_t** input,
@@ -29,11 +29,11 @@
assert(channels != 0);
assert(output_width != 0);
- const float vscale = params->fp32_scalar_lrint.scale;
- const long voutput_min_less_zero_point = params->fp32_scalar_lrint.output_min_less_zero_point;
- const long voutput_max_less_zero_point = params->fp32_scalar_lrint.output_max_less_zero_point;
- const int32_t voutput_zero_point = params->fp32_scalar_lrint.output_zero_point;
- const int32_t vkernel_zero_point = params->fp32_scalar_lrint.kernel_zero_point;
+ const float vscale = params->fp32_scalar_lrintf.scale;
+ const float voutput_min_less_zero_point = params->fp32_scalar_lrintf.output_min_less_zero_point;
+ const float voutput_max_less_zero_point = params->fp32_scalar_lrintf.output_max_less_zero_point;
+ const int32_t voutput_zero_point = params->fp32_scalar_lrintf.output_zero_point;
+ const int32_t vkernel_zero_point = params->fp32_scalar_lrintf.kernel_zero_point;
do {
const uint8_t* i0 = input[0];
assert(i0 != NULL);
@@ -583,20 +583,20 @@
vfpacc2 *= vscale;
vfpacc3 *= vscale;
- long vrndacc0 = lrintf(vfpacc0);
- long vrndacc1 = lrintf(vfpacc1);
- long vrndacc2 = lrintf(vfpacc2);
- long vrndacc3 = lrintf(vfpacc3);
+ vfpacc0 = math_max_f32(vfpacc0, voutput_min_less_zero_point);
+ vfpacc1 = math_max_f32(vfpacc1, voutput_min_less_zero_point);
+ vfpacc2 = math_max_f32(vfpacc2, voutput_min_less_zero_point);
+ vfpacc3 = math_max_f32(vfpacc3, voutput_min_less_zero_point);
- vrndacc0 = XNN_UNPREDICTABLE(vrndacc0 < voutput_min_less_zero_point) ? voutput_min_less_zero_point : vrndacc0;
- vrndacc1 = XNN_UNPREDICTABLE(vrndacc1 < voutput_min_less_zero_point) ? voutput_min_less_zero_point : vrndacc1;
- vrndacc2 = XNN_UNPREDICTABLE(vrndacc2 < voutput_min_less_zero_point) ? voutput_min_less_zero_point : vrndacc2;
- vrndacc3 = XNN_UNPREDICTABLE(vrndacc3 < voutput_min_less_zero_point) ? voutput_min_less_zero_point : vrndacc3;
+ vfpacc0 = math_min_f32(vfpacc0, voutput_max_less_zero_point);
+ vfpacc1 = math_min_f32(vfpacc1, voutput_max_less_zero_point);
+ vfpacc2 = math_min_f32(vfpacc2, voutput_max_less_zero_point);
+ vfpacc3 = math_min_f32(vfpacc3, voutput_max_less_zero_point);
- vrndacc0 = XNN_UNPREDICTABLE(vrndacc0 > voutput_max_less_zero_point) ? voutput_max_less_zero_point : vrndacc0;
- vrndacc1 = XNN_UNPREDICTABLE(vrndacc1 > voutput_max_less_zero_point) ? voutput_max_less_zero_point : vrndacc1;
- vrndacc2 = XNN_UNPREDICTABLE(vrndacc2 > voutput_max_less_zero_point) ? voutput_max_less_zero_point : vrndacc2;
- vrndacc3 = XNN_UNPREDICTABLE(vrndacc3 > voutput_max_less_zero_point) ? voutput_max_less_zero_point : vrndacc3;
+ const int32_t vrndacc0 = (int32_t) lrintf(vfpacc0);
+ const int32_t vrndacc1 = (int32_t) lrintf(vfpacc1);
+ const int32_t vrndacc2 = (int32_t) lrintf(vfpacc2);
+ const int32_t vrndacc3 = (int32_t) lrintf(vfpacc3);
int32_t vout0 = (int32_t) vrndacc0 + voutput_zero_point;
int32_t vout1 = (int32_t) vrndacc1 + voutput_zero_point;
@@ -692,11 +692,11 @@
vacc += vi24 * vk24;
k += 1;
- const float vfpacc = (float) vacc * vscale;
- long vrndacc = lrintf(vfpacc);
- vrndacc = XNN_UNPREDICTABLE(vrndacc < voutput_min_less_zero_point) ? voutput_min_less_zero_point : vrndacc;
- vrndacc = XNN_UNPREDICTABLE(vrndacc > voutput_max_less_zero_point) ? voutput_max_less_zero_point : vrndacc;
- int32_t vout = (int32_t) vrndacc + voutput_zero_point;
+ float vfpacc = (float) vacc * vscale;
+ vfpacc = math_max_f32(vfpacc, voutput_min_less_zero_point);
+ vfpacc = math_min_f32(vfpacc, voutput_max_less_zero_point);
+ const int32_t vrndacc = (int32_t) lrintf(vfpacc);
+ int32_t vout = vrndacc + voutput_zero_point;
*output++ = (uint8_t) vout;
} while (--c != 0);
diff --git a/src/qu8-dwconv/gen/up4x9-minmax-fp32-scalar-lrint.c b/src/qu8-dwconv/gen/up4x9-minmax-fp32-scalar-fmagic.c
similarity index 84%
copy from src/qu8-dwconv/gen/up4x9-minmax-fp32-scalar-lrint.c
copy to src/qu8-dwconv/gen/up4x9-minmax-fp32-scalar-fmagic.c
index 5702d00..b1bb3d2 100644
--- a/src/qu8-dwconv/gen/up4x9-minmax-fp32-scalar-lrint.c
+++ b/src/qu8-dwconv/gen/up4x9-minmax-fp32-scalar-fmagic.c
@@ -8,13 +8,14 @@
// LICENSE file in the root directory of this source tree.
#include <assert.h>
-#include <math.h>
+
+#include <fp16.h>
#include <xnnpack/dwconv.h>
#include <xnnpack/math.h>
-void xnn_qu8_dwconv_minmax_fp32_ukernel_up4x9__scalar_lrint(
+void xnn_qu8_dwconv_minmax_fp32_ukernel_up4x9__scalar_fmagic(
size_t channels,
size_t output_width,
const uint8_t** input,
@@ -29,11 +30,12 @@
assert(channels != 0);
assert(output_width != 0);
- const float vscale = params->fp32_scalar_lrint.scale;
- const long voutput_min_less_zero_point = params->fp32_scalar_lrint.output_min_less_zero_point;
- const long voutput_max_less_zero_point = params->fp32_scalar_lrint.output_max_less_zero_point;
- const int32_t voutput_zero_point = params->fp32_scalar_lrint.output_zero_point;
- const int32_t vkernel_zero_point = params->fp32_scalar_lrint.kernel_zero_point;
+ const float vscale = params->fp32_scalar_fmagic.scale;
+ const float voutput_min_less_zero_point = params->fp32_scalar_fmagic.output_min_less_zero_point;
+ const float voutput_max_less_zero_point = params->fp32_scalar_fmagic.output_max_less_zero_point;
+ const float vmagic_bias = params->fp32_scalar_fmagic.magic_bias;
+ const int32_t vmagic_bias_less_output_zero_point = params->fp32_scalar_fmagic.magic_bias_less_output_zero_point;
+ const int32_t vkernel_zero_point = params->fp32_scalar_fmagic.kernel_zero_point;
do {
const uint8_t* i0 = input[0];
assert(i0 != NULL);
@@ -247,25 +249,25 @@
vfpacc2 *= vscale;
vfpacc3 *= vscale;
- long vrndacc0 = lrintf(vfpacc0);
- long vrndacc1 = lrintf(vfpacc1);
- long vrndacc2 = lrintf(vfpacc2);
- long vrndacc3 = lrintf(vfpacc3);
+ vfpacc0 = math_max_f32(vfpacc0, voutput_min_less_zero_point);
+ vfpacc1 = math_max_f32(vfpacc1, voutput_min_less_zero_point);
+ vfpacc2 = math_max_f32(vfpacc2, voutput_min_less_zero_point);
+ vfpacc3 = math_max_f32(vfpacc3, voutput_min_less_zero_point);
- vrndacc0 = XNN_UNPREDICTABLE(vrndacc0 < voutput_min_less_zero_point) ? voutput_min_less_zero_point : vrndacc0;
- vrndacc1 = XNN_UNPREDICTABLE(vrndacc1 < voutput_min_less_zero_point) ? voutput_min_less_zero_point : vrndacc1;
- vrndacc2 = XNN_UNPREDICTABLE(vrndacc2 < voutput_min_less_zero_point) ? voutput_min_less_zero_point : vrndacc2;
- vrndacc3 = XNN_UNPREDICTABLE(vrndacc3 < voutput_min_less_zero_point) ? voutput_min_less_zero_point : vrndacc3;
+ vfpacc0 = math_min_f32(vfpacc0, voutput_max_less_zero_point);
+ vfpacc1 = math_min_f32(vfpacc1, voutput_max_less_zero_point);
+ vfpacc2 = math_min_f32(vfpacc2, voutput_max_less_zero_point);
+ vfpacc3 = math_min_f32(vfpacc3, voutput_max_less_zero_point);
- vrndacc0 = XNN_UNPREDICTABLE(vrndacc0 > voutput_max_less_zero_point) ? voutput_max_less_zero_point : vrndacc0;
- vrndacc1 = XNN_UNPREDICTABLE(vrndacc1 > voutput_max_less_zero_point) ? voutput_max_less_zero_point : vrndacc1;
- vrndacc2 = XNN_UNPREDICTABLE(vrndacc2 > voutput_max_less_zero_point) ? voutput_max_less_zero_point : vrndacc2;
- vrndacc3 = XNN_UNPREDICTABLE(vrndacc3 > voutput_max_less_zero_point) ? voutput_max_less_zero_point : vrndacc3;
+ vfpacc0 += vmagic_bias;
+ vfpacc1 += vmagic_bias;
+ vfpacc2 += vmagic_bias;
+ vfpacc3 += vmagic_bias;
- int32_t vout0 = (int32_t) vrndacc0 + voutput_zero_point;
- int32_t vout1 = (int32_t) vrndacc1 + voutput_zero_point;
- int32_t vout2 = (int32_t) vrndacc2 + voutput_zero_point;
- int32_t vout3 = (int32_t) vrndacc3 + voutput_zero_point;
+ int32_t vout0 = (int32_t) fp32_to_bits(vfpacc0) - vmagic_bias_less_output_zero_point;
+ int32_t vout1 = (int32_t) fp32_to_bits(vfpacc1) - vmagic_bias_less_output_zero_point;
+ int32_t vout2 = (int32_t) fp32_to_bits(vfpacc2) - vmagic_bias_less_output_zero_point;
+ int32_t vout3 = (int32_t) fp32_to_bits(vfpacc3) - vmagic_bias_less_output_zero_point;
output[0] = (uint8_t) vout0;
output[1] = (uint8_t) vout1;
@@ -308,11 +310,11 @@
vacc += vi8 * vk8;
k += 1;
- const float vfpacc = (float) vacc * vscale;
- long vrndacc = lrintf(vfpacc);
- vrndacc = XNN_UNPREDICTABLE(vrndacc < voutput_min_less_zero_point) ? voutput_min_less_zero_point : vrndacc;
- vrndacc = XNN_UNPREDICTABLE(vrndacc > voutput_max_less_zero_point) ? voutput_max_less_zero_point : vrndacc;
- int32_t vout = (int32_t) vrndacc + voutput_zero_point;
+ float vfpacc = (float) vacc * vscale;
+ vfpacc = math_max_f32(vfpacc, voutput_min_less_zero_point);
+ vfpacc = math_min_f32(vfpacc, voutput_max_less_zero_point);
+ vfpacc += vmagic_bias;
+ int32_t vout = (int32_t) fp32_to_bits(vfpacc) - vmagic_bias_less_output_zero_point;
*output++ = (uint8_t) vout;
} while (--c != 0);
diff --git a/src/qu8-dwconv/gen/up4x9-minmax-fp32-scalar-lrint.c b/src/qu8-dwconv/gen/up4x9-minmax-fp32-scalar-lrintf.c
similarity index 86%
rename from src/qu8-dwconv/gen/up4x9-minmax-fp32-scalar-lrint.c
rename to src/qu8-dwconv/gen/up4x9-minmax-fp32-scalar-lrintf.c
index 5702d00..4c051e5 100644
--- a/src/qu8-dwconv/gen/up4x9-minmax-fp32-scalar-lrint.c
+++ b/src/qu8-dwconv/gen/up4x9-minmax-fp32-scalar-lrintf.c
@@ -14,7 +14,7 @@
#include <xnnpack/math.h>
-void xnn_qu8_dwconv_minmax_fp32_ukernel_up4x9__scalar_lrint(
+void xnn_qu8_dwconv_minmax_fp32_ukernel_up4x9__scalar_lrintf(
size_t channels,
size_t output_width,
const uint8_t** input,
@@ -29,11 +29,11 @@
assert(channels != 0);
assert(output_width != 0);
- const float vscale = params->fp32_scalar_lrint.scale;
- const long voutput_min_less_zero_point = params->fp32_scalar_lrint.output_min_less_zero_point;
- const long voutput_max_less_zero_point = params->fp32_scalar_lrint.output_max_less_zero_point;
- const int32_t voutput_zero_point = params->fp32_scalar_lrint.output_zero_point;
- const int32_t vkernel_zero_point = params->fp32_scalar_lrint.kernel_zero_point;
+ const float vscale = params->fp32_scalar_lrintf.scale;
+ const float voutput_min_less_zero_point = params->fp32_scalar_lrintf.output_min_less_zero_point;
+ const float voutput_max_less_zero_point = params->fp32_scalar_lrintf.output_max_less_zero_point;
+ const int32_t voutput_zero_point = params->fp32_scalar_lrintf.output_zero_point;
+ const int32_t vkernel_zero_point = params->fp32_scalar_lrintf.kernel_zero_point;
do {
const uint8_t* i0 = input[0];
assert(i0 != NULL);
@@ -247,20 +247,20 @@
vfpacc2 *= vscale;
vfpacc3 *= vscale;
- long vrndacc0 = lrintf(vfpacc0);
- long vrndacc1 = lrintf(vfpacc1);
- long vrndacc2 = lrintf(vfpacc2);
- long vrndacc3 = lrintf(vfpacc3);
+ vfpacc0 = math_max_f32(vfpacc0, voutput_min_less_zero_point);
+ vfpacc1 = math_max_f32(vfpacc1, voutput_min_less_zero_point);
+ vfpacc2 = math_max_f32(vfpacc2, voutput_min_less_zero_point);
+ vfpacc3 = math_max_f32(vfpacc3, voutput_min_less_zero_point);
- vrndacc0 = XNN_UNPREDICTABLE(vrndacc0 < voutput_min_less_zero_point) ? voutput_min_less_zero_point : vrndacc0;
- vrndacc1 = XNN_UNPREDICTABLE(vrndacc1 < voutput_min_less_zero_point) ? voutput_min_less_zero_point : vrndacc1;
- vrndacc2 = XNN_UNPREDICTABLE(vrndacc2 < voutput_min_less_zero_point) ? voutput_min_less_zero_point : vrndacc2;
- vrndacc3 = XNN_UNPREDICTABLE(vrndacc3 < voutput_min_less_zero_point) ? voutput_min_less_zero_point : vrndacc3;
+ vfpacc0 = math_min_f32(vfpacc0, voutput_max_less_zero_point);
+ vfpacc1 = math_min_f32(vfpacc1, voutput_max_less_zero_point);
+ vfpacc2 = math_min_f32(vfpacc2, voutput_max_less_zero_point);
+ vfpacc3 = math_min_f32(vfpacc3, voutput_max_less_zero_point);
- vrndacc0 = XNN_UNPREDICTABLE(vrndacc0 > voutput_max_less_zero_point) ? voutput_max_less_zero_point : vrndacc0;
- vrndacc1 = XNN_UNPREDICTABLE(vrndacc1 > voutput_max_less_zero_point) ? voutput_max_less_zero_point : vrndacc1;
- vrndacc2 = XNN_UNPREDICTABLE(vrndacc2 > voutput_max_less_zero_point) ? voutput_max_less_zero_point : vrndacc2;
- vrndacc3 = XNN_UNPREDICTABLE(vrndacc3 > voutput_max_less_zero_point) ? voutput_max_less_zero_point : vrndacc3;
+ const int32_t vrndacc0 = (int32_t) lrintf(vfpacc0);
+ const int32_t vrndacc1 = (int32_t) lrintf(vfpacc1);
+ const int32_t vrndacc2 = (int32_t) lrintf(vfpacc2);
+ const int32_t vrndacc3 = (int32_t) lrintf(vfpacc3);
int32_t vout0 = (int32_t) vrndacc0 + voutput_zero_point;
int32_t vout1 = (int32_t) vrndacc1 + voutput_zero_point;
@@ -308,11 +308,11 @@
vacc += vi8 * vk8;
k += 1;
- const float vfpacc = (float) vacc * vscale;
- long vrndacc = lrintf(vfpacc);
- vrndacc = XNN_UNPREDICTABLE(vrndacc < voutput_min_less_zero_point) ? voutput_min_less_zero_point : vrndacc;
- vrndacc = XNN_UNPREDICTABLE(vrndacc > voutput_max_less_zero_point) ? voutput_max_less_zero_point : vrndacc;
- int32_t vout = (int32_t) vrndacc + voutput_zero_point;
+ float vfpacc = (float) vacc * vscale;
+ vfpacc = math_max_f32(vfpacc, voutput_min_less_zero_point);
+ vfpacc = math_min_f32(vfpacc, voutput_max_less_zero_point);
+ const int32_t vrndacc = (int32_t) lrintf(vfpacc);
+ int32_t vout = vrndacc + voutput_zero_point;
*output++ = (uint8_t) vout;
} while (--c != 0);
diff --git a/src/qu8-gemm/gen/1x2-minmax-fp32-scalar-magic.c b/src/qu8-gemm/gen/1x2-minmax-fp32-scalar-fmagic.c
similarity index 86%
rename from src/qu8-gemm/gen/1x2-minmax-fp32-scalar-magic.c
rename to src/qu8-gemm/gen/1x2-minmax-fp32-scalar-fmagic.c
index c9b8397..69bba97 100644
--- a/src/qu8-gemm/gen/1x2-minmax-fp32-scalar-magic.c
+++ b/src/qu8-gemm/gen/1x2-minmax-fp32-scalar-fmagic.c
@@ -15,7 +15,7 @@
#include <xnnpack/gemm.h>
-void xnn_qu8_gemm_minmax_fp32_ukernel_1x2__scalar_magic(
+void xnn_qu8_gemm_minmax_fp32_ukernel_1x2__scalar_fmagic(
size_t mr,
size_t nc,
size_t kc,
@@ -35,7 +35,7 @@
const uint8_t* a0 = a;
uint8_t* c0 = c;
- const int32_t vb_zero_point = params->fp32_scalar_magic.kernel_zero_point;
+ const int32_t vb_zero_point = params->fp32_scalar_fmagic.kernel_zero_point;
do {
int32_t vacc0x0 = ((const int32_t*) w)[0];
int32_t vacc0x1 = ((const int32_t*) w)[1];
@@ -58,23 +58,23 @@
float vfpacc0x0 = (float) vacc0x0;
float vfpacc0x1 = (float) vacc0x1;
- const float vscale = params->fp32_scalar_magic.scale;
+ const float vscale = params->fp32_scalar_fmagic.scale;
vfpacc0x0 *= vscale;
vfpacc0x1 *= vscale;
- const float voutput_min_less_zero_point = params->fp32_scalar_magic.output_min_less_zero_point;
+ const float voutput_min_less_zero_point = params->fp32_scalar_fmagic.output_min_less_zero_point;
vfpacc0x0 = math_max_f32(vfpacc0x0, voutput_min_less_zero_point);
vfpacc0x1 = math_max_f32(vfpacc0x1, voutput_min_less_zero_point);
- const float voutput_max_less_zero_point = params->fp32_scalar_magic.output_max_less_zero_point;
+ const float voutput_max_less_zero_point = params->fp32_scalar_fmagic.output_max_less_zero_point;
vfpacc0x0 = math_min_f32(vfpacc0x0, voutput_max_less_zero_point);
vfpacc0x1 = math_min_f32(vfpacc0x1, voutput_max_less_zero_point);
- const float vmagic_bias = params->fp32_scalar_magic.magic_bias;
+ const float vmagic_bias = params->fp32_scalar_fmagic.magic_bias;
vfpacc0x0 += vmagic_bias;
vfpacc0x1 += vmagic_bias;
- const int32_t vmagic_bias_less_output_zero_point = params->fp32_scalar_magic.magic_bias_less_output_zero_point;
+ const int32_t vmagic_bias_less_output_zero_point = params->fp32_scalar_fmagic.magic_bias_less_output_zero_point;
int32_t vout0x0 = (int32_t) fp32_to_bits(vfpacc0x0) - vmagic_bias_less_output_zero_point;
int32_t vout0x1 = (int32_t) fp32_to_bits(vfpacc0x1) - vmagic_bias_less_output_zero_point;
diff --git a/src/qu8-gemm/gen/1x2-minmax-fp32-scalar-lrint.c b/src/qu8-gemm/gen/1x2-minmax-fp32-scalar-lrint.c
deleted file mode 100644
index 81e69f3..0000000
--- a/src/qu8-gemm/gen/1x2-minmax-fp32-scalar-lrint.c
+++ /dev/null
@@ -1,96 +0,0 @@
-// Auto-generated file. Do not edit!
-// Template: src/qs8-gemm/scalar.c.in
-// Generator: tools/xngen
-//
-// Copyright 2021 Google LLC
-//
-// This source code is licensed under the BSD-style license found in the
-// LICENSE file in the root directory of this source tree.
-
-#include <assert.h>
-#include <math.h>
-
-#include <xnnpack/math.h>
-#include <xnnpack/gemm.h>
-
-
-void xnn_qu8_gemm_minmax_fp32_ukernel_1x2__scalar_lrint(
- size_t mr,
- size_t nc,
- size_t kc,
- const uint8_t* restrict a,
- size_t a_stride,
- const void* restrict w,
- uint8_t* restrict c,
- size_t cm_stride,
- size_t cn_stride,
- const union xnn_qu8_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
-{
- assert(mr != 0);
- assert(mr <= 1);
- assert(nc != 0);
- assert(kc != 0);
-
- const uint8_t* a0 = a;
- uint8_t* c0 = c;
-
- const int32_t vb_zero_point = params->fp32_scalar_lrint.kernel_zero_point;
- do {
- int32_t vacc0x0 = ((const int32_t*) w)[0];
- int32_t vacc0x1 = ((const int32_t*) w)[1];
- w = (const void*) ((const int32_t*) w + 2);
-
- size_t k = kc;
- do {
- const int32_t va0 = (int32_t) (uint32_t) *a0++;
-
- const int32_t vb0 = (int32_t) (uint32_t) ((const uint8_t*) w)[0] - vb_zero_point;
- const int32_t vb1 = (int32_t) (uint32_t) ((const uint8_t*) w)[1] - vb_zero_point;
- w = (const void*) ((const uint8_t*) w + 2);
-
- vacc0x0 += va0 * vb0;
- vacc0x1 += va0 * vb1;
-
- k -= sizeof(uint8_t);
- } while (k != 0);
-
- float vfpacc0x0 = (float) vacc0x0;
- float vfpacc0x1 = (float) vacc0x1;
-
- const float vscale = params->fp32_scalar_lrint.scale;
- vfpacc0x0 *= vscale;
- vfpacc0x1 *= vscale;
-
- long vrndacc0x0 = lrintf(vfpacc0x0);
- long vrndacc0x1 = lrintf(vfpacc0x1);
-
- const long voutput_min_less_zero_point = params->fp32_scalar_lrint.output_min_less_zero_point;
- vrndacc0x0 = XNN_UNPREDICTABLE(vrndacc0x0 < voutput_min_less_zero_point) ? voutput_min_less_zero_point : vrndacc0x0;
- vrndacc0x1 = XNN_UNPREDICTABLE(vrndacc0x1 < voutput_min_less_zero_point) ? voutput_min_less_zero_point : vrndacc0x1;
-
- const long voutput_max_less_zero_point = params->fp32_scalar_lrint.output_max_less_zero_point;
- vrndacc0x0 = XNN_UNPREDICTABLE(vrndacc0x0 > voutput_max_less_zero_point) ? voutput_max_less_zero_point : vrndacc0x0;
- vrndacc0x1 = XNN_UNPREDICTABLE(vrndacc0x1 > voutput_max_less_zero_point) ? voutput_max_less_zero_point : vrndacc0x1;
-
- const int32_t voutput_zero_point = params->fp32_scalar_lrint.output_zero_point;
- int32_t vout0x0 = (int32_t) vrndacc0x0 + voutput_zero_point;
- int32_t vout0x1 = (int32_t) vrndacc0x1 + voutput_zero_point;
-
- if XNN_LIKELY(nc >= 2) {
- c0[0] = (uint8_t) vout0x0;
- c0[1] = (uint8_t) vout0x1;
-
- a0 = (const uint8_t*) ((uintptr_t) a0 - kc);
-
- c0 = (uint8_t*) ((uintptr_t) c0 + cn_stride);
-
- nc -= 2;
- } else {
- if (nc & 1) {
- c0[0] = (uint8_t) vout0x0;
- }
-
- nc = 0;
- }
- } while (nc != 0);
-}
diff --git a/src/qu8-gemm/gen/1x2-minmax-fp32-scalar-magic.c b/src/qu8-gemm/gen/1x2-minmax-fp32-scalar-lrintf.c
similarity index 75%
copy from src/qu8-gemm/gen/1x2-minmax-fp32-scalar-magic.c
copy to src/qu8-gemm/gen/1x2-minmax-fp32-scalar-lrintf.c
index c9b8397..111dddb 100644
--- a/src/qu8-gemm/gen/1x2-minmax-fp32-scalar-magic.c
+++ b/src/qu8-gemm/gen/1x2-minmax-fp32-scalar-lrintf.c
@@ -8,14 +8,13 @@
// LICENSE file in the root directory of this source tree.
#include <assert.h>
-
-#include <fp16.h>
+#include <math.h>
#include <xnnpack/math.h>
#include <xnnpack/gemm.h>
-void xnn_qu8_gemm_minmax_fp32_ukernel_1x2__scalar_magic(
+void xnn_qu8_gemm_minmax_fp32_ukernel_1x2__scalar_lrintf(
size_t mr,
size_t nc,
size_t kc,
@@ -35,7 +34,7 @@
const uint8_t* a0 = a;
uint8_t* c0 = c;
- const int32_t vb_zero_point = params->fp32_scalar_magic.kernel_zero_point;
+ const int32_t vb_zero_point = params->fp32_scalar_lrintf.kernel_zero_point;
do {
int32_t vacc0x0 = ((const int32_t*) w)[0];
int32_t vacc0x1 = ((const int32_t*) w)[1];
@@ -58,25 +57,24 @@
float vfpacc0x0 = (float) vacc0x0;
float vfpacc0x1 = (float) vacc0x1;
- const float vscale = params->fp32_scalar_magic.scale;
+ const float vscale = params->fp32_scalar_lrintf.scale;
vfpacc0x0 *= vscale;
vfpacc0x1 *= vscale;
- const float voutput_min_less_zero_point = params->fp32_scalar_magic.output_min_less_zero_point;
+ const float voutput_min_less_zero_point = params->fp32_scalar_lrintf.output_min_less_zero_point;
vfpacc0x0 = math_max_f32(vfpacc0x0, voutput_min_less_zero_point);
vfpacc0x1 = math_max_f32(vfpacc0x1, voutput_min_less_zero_point);
- const float voutput_max_less_zero_point = params->fp32_scalar_magic.output_max_less_zero_point;
+ const float voutput_max_less_zero_point = params->fp32_scalar_lrintf.output_max_less_zero_point;
vfpacc0x0 = math_min_f32(vfpacc0x0, voutput_max_less_zero_point);
vfpacc0x1 = math_min_f32(vfpacc0x1, voutput_max_less_zero_point);
- const float vmagic_bias = params->fp32_scalar_magic.magic_bias;
- vfpacc0x0 += vmagic_bias;
- vfpacc0x1 += vmagic_bias;
+ const int32_t vrndacc0x0 = (int32_t) lrintf(vfpacc0x0);
+ const int32_t vrndacc0x1 = (int32_t) lrintf(vfpacc0x1);
- const int32_t vmagic_bias_less_output_zero_point = params->fp32_scalar_magic.magic_bias_less_output_zero_point;
- int32_t vout0x0 = (int32_t) fp32_to_bits(vfpacc0x0) - vmagic_bias_less_output_zero_point;
- int32_t vout0x1 = (int32_t) fp32_to_bits(vfpacc0x1) - vmagic_bias_less_output_zero_point;
+ const int32_t voutput_zero_point = params->fp32_scalar_lrintf.output_zero_point;
+ int32_t vout0x0 = vrndacc0x0 + voutput_zero_point;
+ int32_t vout0x1 = vrndacc0x1 + voutput_zero_point;
if XNN_LIKELY(nc >= 2) {
c0[0] = (uint8_t) vout0x0;
diff --git a/src/qu8-gemm/gen/1x4-minmax-fp32-scalar-magic.c b/src/qu8-gemm/gen/1x4-minmax-fp32-scalar-fmagic.c
similarity index 90%
rename from src/qu8-gemm/gen/1x4-minmax-fp32-scalar-magic.c
rename to src/qu8-gemm/gen/1x4-minmax-fp32-scalar-fmagic.c
index 4251d0c..51d932d 100644
--- a/src/qu8-gemm/gen/1x4-minmax-fp32-scalar-magic.c
+++ b/src/qu8-gemm/gen/1x4-minmax-fp32-scalar-fmagic.c
@@ -15,7 +15,7 @@
#include <xnnpack/gemm.h>
-void xnn_qu8_gemm_minmax_fp32_ukernel_1x4__scalar_magic(
+void xnn_qu8_gemm_minmax_fp32_ukernel_1x4__scalar_fmagic(
size_t mr,
size_t nc,
size_t kc,
@@ -35,7 +35,7 @@
const uint8_t* a0 = a;
uint8_t* c0 = c;
- const int32_t vb_zero_point = params->fp32_scalar_magic.kernel_zero_point;
+ const int32_t vb_zero_point = params->fp32_scalar_fmagic.kernel_zero_point;
do {
int32_t vacc0x0 = ((const int32_t*) w)[0];
int32_t vacc0x1 = ((const int32_t*) w)[1];
@@ -66,31 +66,31 @@
float vfpacc0x2 = (float) vacc0x2;
float vfpacc0x3 = (float) vacc0x3;
- const float vscale = params->fp32_scalar_magic.scale;
+ const float vscale = params->fp32_scalar_fmagic.scale;
vfpacc0x0 *= vscale;
vfpacc0x1 *= vscale;
vfpacc0x2 *= vscale;
vfpacc0x3 *= vscale;
- const float voutput_min_less_zero_point = params->fp32_scalar_magic.output_min_less_zero_point;
+ const float voutput_min_less_zero_point = params->fp32_scalar_fmagic.output_min_less_zero_point;
vfpacc0x0 = math_max_f32(vfpacc0x0, voutput_min_less_zero_point);
vfpacc0x1 = math_max_f32(vfpacc0x1, voutput_min_less_zero_point);
vfpacc0x2 = math_max_f32(vfpacc0x2, voutput_min_less_zero_point);
vfpacc0x3 = math_max_f32(vfpacc0x3, voutput_min_less_zero_point);
- const float voutput_max_less_zero_point = params->fp32_scalar_magic.output_max_less_zero_point;
+ const float voutput_max_less_zero_point = params->fp32_scalar_fmagic.output_max_less_zero_point;
vfpacc0x0 = math_min_f32(vfpacc0x0, voutput_max_less_zero_point);
vfpacc0x1 = math_min_f32(vfpacc0x1, voutput_max_less_zero_point);
vfpacc0x2 = math_min_f32(vfpacc0x2, voutput_max_less_zero_point);
vfpacc0x3 = math_min_f32(vfpacc0x3, voutput_max_less_zero_point);
- const float vmagic_bias = params->fp32_scalar_magic.magic_bias;
+ const float vmagic_bias = params->fp32_scalar_fmagic.magic_bias;
vfpacc0x0 += vmagic_bias;
vfpacc0x1 += vmagic_bias;
vfpacc0x2 += vmagic_bias;
vfpacc0x3 += vmagic_bias;
- const int32_t vmagic_bias_less_output_zero_point = params->fp32_scalar_magic.magic_bias_less_output_zero_point;
+ const int32_t vmagic_bias_less_output_zero_point = params->fp32_scalar_fmagic.magic_bias_less_output_zero_point;
int32_t vout0x0 = (int32_t) fp32_to_bits(vfpacc0x0) - vmagic_bias_less_output_zero_point;
int32_t vout0x1 = (int32_t) fp32_to_bits(vfpacc0x1) - vmagic_bias_less_output_zero_point;
int32_t vout0x2 = (int32_t) fp32_to_bits(vfpacc0x2) - vmagic_bias_less_output_zero_point;
diff --git a/src/qu8-gemm/gen/1x4-minmax-fp32-scalar-lrint.c b/src/qu8-gemm/gen/1x4-minmax-fp32-scalar-lrint.c
deleted file mode 100644
index 6279f33..0000000
--- a/src/qu8-gemm/gen/1x4-minmax-fp32-scalar-lrint.c
+++ /dev/null
@@ -1,122 +0,0 @@
-// Auto-generated file. Do not edit!
-// Template: src/qs8-gemm/scalar.c.in
-// Generator: tools/xngen
-//
-// Copyright 2021 Google LLC
-//
-// This source code is licensed under the BSD-style license found in the
-// LICENSE file in the root directory of this source tree.
-
-#include <assert.h>
-#include <math.h>
-
-#include <xnnpack/math.h>
-#include <xnnpack/gemm.h>
-
-
-void xnn_qu8_gemm_minmax_fp32_ukernel_1x4__scalar_lrint(
- size_t mr,
- size_t nc,
- size_t kc,
- const uint8_t* restrict a,
- size_t a_stride,
- const void* restrict w,
- uint8_t* restrict c,
- size_t cm_stride,
- size_t cn_stride,
- const union xnn_qu8_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
-{
- assert(mr != 0);
- assert(mr <= 1);
- assert(nc != 0);
- assert(kc != 0);
-
- const uint8_t* a0 = a;
- uint8_t* c0 = c;
-
- const int32_t vb_zero_point = params->fp32_scalar_lrint.kernel_zero_point;
- do {
- int32_t vacc0x0 = ((const int32_t*) w)[0];
- int32_t vacc0x1 = ((const int32_t*) w)[1];
- int32_t vacc0x2 = ((const int32_t*) w)[2];
- int32_t vacc0x3 = ((const int32_t*) w)[3];
- w = (const void*) ((const int32_t*) w + 4);
-
- size_t k = kc;
- do {
- const int32_t va0 = (int32_t) (uint32_t) *a0++;
-
- const int32_t vb0 = (int32_t) (uint32_t) ((const uint8_t*) w)[0] - vb_zero_point;
- const int32_t vb1 = (int32_t) (uint32_t) ((const uint8_t*) w)[1] - vb_zero_point;
- const int32_t vb2 = (int32_t) (uint32_t) ((const uint8_t*) w)[2] - vb_zero_point;
- const int32_t vb3 = (int32_t) (uint32_t) ((const uint8_t*) w)[3] - vb_zero_point;
- w = (const void*) ((const uint8_t*) w + 4);
-
- vacc0x0 += va0 * vb0;
- vacc0x1 += va0 * vb1;
- vacc0x2 += va0 * vb2;
- vacc0x3 += va0 * vb3;
-
- k -= sizeof(uint8_t);
- } while (k != 0);
-
- float vfpacc0x0 = (float) vacc0x0;
- float vfpacc0x1 = (float) vacc0x1;
- float vfpacc0x2 = (float) vacc0x2;
- float vfpacc0x3 = (float) vacc0x3;
-
- const float vscale = params->fp32_scalar_lrint.scale;
- vfpacc0x0 *= vscale;
- vfpacc0x1 *= vscale;
- vfpacc0x2 *= vscale;
- vfpacc0x3 *= vscale;
-
- long vrndacc0x0 = lrintf(vfpacc0x0);
- long vrndacc0x1 = lrintf(vfpacc0x1);
- long vrndacc0x2 = lrintf(vfpacc0x2);
- long vrndacc0x3 = lrintf(vfpacc0x3);
-
- const long voutput_min_less_zero_point = params->fp32_scalar_lrint.output_min_less_zero_point;
- vrndacc0x0 = XNN_UNPREDICTABLE(vrndacc0x0 < voutput_min_less_zero_point) ? voutput_min_less_zero_point : vrndacc0x0;
- vrndacc0x1 = XNN_UNPREDICTABLE(vrndacc0x1 < voutput_min_less_zero_point) ? voutput_min_less_zero_point : vrndacc0x1;
- vrndacc0x2 = XNN_UNPREDICTABLE(vrndacc0x2 < voutput_min_less_zero_point) ? voutput_min_less_zero_point : vrndacc0x2;
- vrndacc0x3 = XNN_UNPREDICTABLE(vrndacc0x3 < voutput_min_less_zero_point) ? voutput_min_less_zero_point : vrndacc0x3;
-
- const long voutput_max_less_zero_point = params->fp32_scalar_lrint.output_max_less_zero_point;
- vrndacc0x0 = XNN_UNPREDICTABLE(vrndacc0x0 > voutput_max_less_zero_point) ? voutput_max_less_zero_point : vrndacc0x0;
- vrndacc0x1 = XNN_UNPREDICTABLE(vrndacc0x1 > voutput_max_less_zero_point) ? voutput_max_less_zero_point : vrndacc0x1;
- vrndacc0x2 = XNN_UNPREDICTABLE(vrndacc0x2 > voutput_max_less_zero_point) ? voutput_max_less_zero_point : vrndacc0x2;
- vrndacc0x3 = XNN_UNPREDICTABLE(vrndacc0x3 > voutput_max_less_zero_point) ? voutput_max_less_zero_point : vrndacc0x3;
-
- const int32_t voutput_zero_point = params->fp32_scalar_lrint.output_zero_point;
- int32_t vout0x0 = (int32_t) vrndacc0x0 + voutput_zero_point;
- int32_t vout0x1 = (int32_t) vrndacc0x1 + voutput_zero_point;
- int32_t vout0x2 = (int32_t) vrndacc0x2 + voutput_zero_point;
- int32_t vout0x3 = (int32_t) vrndacc0x3 + voutput_zero_point;
-
- if XNN_LIKELY(nc >= 4) {
- c0[0] = (uint8_t) vout0x0;
- c0[1] = (uint8_t) vout0x1;
- c0[2] = (uint8_t) vout0x2;
- c0[3] = (uint8_t) vout0x3;
-
- a0 = (const uint8_t*) ((uintptr_t) a0 - kc);
-
- c0 = (uint8_t*) ((uintptr_t) c0 + cn_stride);
-
- nc -= 4;
- } else {
- if (nc & 2) {
- c0[0] = (uint8_t) vout0x0;
- c0[1] = (uint8_t) vout0x1;
- vout0x0 = vout0x2;
- c0 += 2;
- }
- if (nc & 1) {
- c0[0] = (uint8_t) vout0x0;
- }
-
- nc = 0;
- }
- } while (nc != 0);
-}
diff --git a/src/qu8-gemm/gen/1x4-minmax-fp32-scalar-magic.c b/src/qu8-gemm/gen/1x4-minmax-fp32-scalar-lrintf.c
similarity index 76%
copy from src/qu8-gemm/gen/1x4-minmax-fp32-scalar-magic.c
copy to src/qu8-gemm/gen/1x4-minmax-fp32-scalar-lrintf.c
index 4251d0c..7fb202f 100644
--- a/src/qu8-gemm/gen/1x4-minmax-fp32-scalar-magic.c
+++ b/src/qu8-gemm/gen/1x4-minmax-fp32-scalar-lrintf.c
@@ -8,14 +8,13 @@
// LICENSE file in the root directory of this source tree.
#include <assert.h>
-
-#include <fp16.h>
+#include <math.h>
#include <xnnpack/math.h>
#include <xnnpack/gemm.h>
-void xnn_qu8_gemm_minmax_fp32_ukernel_1x4__scalar_magic(
+void xnn_qu8_gemm_minmax_fp32_ukernel_1x4__scalar_lrintf(
size_t mr,
size_t nc,
size_t kc,
@@ -35,7 +34,7 @@
const uint8_t* a0 = a;
uint8_t* c0 = c;
- const int32_t vb_zero_point = params->fp32_scalar_magic.kernel_zero_point;
+ const int32_t vb_zero_point = params->fp32_scalar_lrintf.kernel_zero_point;
do {
int32_t vacc0x0 = ((const int32_t*) w)[0];
int32_t vacc0x1 = ((const int32_t*) w)[1];
@@ -66,35 +65,34 @@
float vfpacc0x2 = (float) vacc0x2;
float vfpacc0x3 = (float) vacc0x3;
- const float vscale = params->fp32_scalar_magic.scale;
+ const float vscale = params->fp32_scalar_lrintf.scale;
vfpacc0x0 *= vscale;
vfpacc0x1 *= vscale;
vfpacc0x2 *= vscale;
vfpacc0x3 *= vscale;
- const float voutput_min_less_zero_point = params->fp32_scalar_magic.output_min_less_zero_point;
+ const float voutput_min_less_zero_point = params->fp32_scalar_lrintf.output_min_less_zero_point;
vfpacc0x0 = math_max_f32(vfpacc0x0, voutput_min_less_zero_point);
vfpacc0x1 = math_max_f32(vfpacc0x1, voutput_min_less_zero_point);
vfpacc0x2 = math_max_f32(vfpacc0x2, voutput_min_less_zero_point);
vfpacc0x3 = math_max_f32(vfpacc0x3, voutput_min_less_zero_point);
- const float voutput_max_less_zero_point = params->fp32_scalar_magic.output_max_less_zero_point;
+ const float voutput_max_less_zero_point = params->fp32_scalar_lrintf.output_max_less_zero_point;
vfpacc0x0 = math_min_f32(vfpacc0x0, voutput_max_less_zero_point);
vfpacc0x1 = math_min_f32(vfpacc0x1, voutput_max_less_zero_point);
vfpacc0x2 = math_min_f32(vfpacc0x2, voutput_max_less_zero_point);
vfpacc0x3 = math_min_f32(vfpacc0x3, voutput_max_less_zero_point);
- const float vmagic_bias = params->fp32_scalar_magic.magic_bias;
- vfpacc0x0 += vmagic_bias;
- vfpacc0x1 += vmagic_bias;
- vfpacc0x2 += vmagic_bias;
- vfpacc0x3 += vmagic_bias;
+ const int32_t vrndacc0x0 = (int32_t) lrintf(vfpacc0x0);
+ const int32_t vrndacc0x1 = (int32_t) lrintf(vfpacc0x1);
+ const int32_t vrndacc0x2 = (int32_t) lrintf(vfpacc0x2);
+ const int32_t vrndacc0x3 = (int32_t) lrintf(vfpacc0x3);
- const int32_t vmagic_bias_less_output_zero_point = params->fp32_scalar_magic.magic_bias_less_output_zero_point;
- int32_t vout0x0 = (int32_t) fp32_to_bits(vfpacc0x0) - vmagic_bias_less_output_zero_point;
- int32_t vout0x1 = (int32_t) fp32_to_bits(vfpacc0x1) - vmagic_bias_less_output_zero_point;
- int32_t vout0x2 = (int32_t) fp32_to_bits(vfpacc0x2) - vmagic_bias_less_output_zero_point;
- int32_t vout0x3 = (int32_t) fp32_to_bits(vfpacc0x3) - vmagic_bias_less_output_zero_point;
+ const int32_t voutput_zero_point = params->fp32_scalar_lrintf.output_zero_point;
+ int32_t vout0x0 = vrndacc0x0 + voutput_zero_point;
+ int32_t vout0x1 = vrndacc0x1 + voutput_zero_point;
+ int32_t vout0x2 = vrndacc0x2 + voutput_zero_point;
+ int32_t vout0x3 = vrndacc0x3 + voutput_zero_point;
if XNN_LIKELY(nc >= 4) {
c0[0] = (uint8_t) vout0x0;
diff --git a/src/qu8-gemm/gen/2x2-minmax-fp32-scalar-magic.c b/src/qu8-gemm/gen/2x2-minmax-fp32-scalar-fmagic.c
similarity index 90%
rename from src/qu8-gemm/gen/2x2-minmax-fp32-scalar-magic.c
rename to src/qu8-gemm/gen/2x2-minmax-fp32-scalar-fmagic.c
index 050b4d2..448c1b1 100644
--- a/src/qu8-gemm/gen/2x2-minmax-fp32-scalar-magic.c
+++ b/src/qu8-gemm/gen/2x2-minmax-fp32-scalar-fmagic.c
@@ -15,7 +15,7 @@
#include <xnnpack/gemm.h>
-void xnn_qu8_gemm_minmax_fp32_ukernel_2x2__scalar_magic(
+void xnn_qu8_gemm_minmax_fp32_ukernel_2x2__scalar_fmagic(
size_t mr,
size_t nc,
size_t kc,
@@ -41,7 +41,7 @@
c1 = c0;
}
- const int32_t vb_zero_point = params->fp32_scalar_magic.kernel_zero_point;
+ const int32_t vb_zero_point = params->fp32_scalar_fmagic.kernel_zero_point;
do {
int32_t vacc0x0 = ((const int32_t*) w)[0];
int32_t vacc0x1 = ((const int32_t*) w)[1];
@@ -71,31 +71,31 @@
float vfpacc1x0 = (float) vacc1x0;
float vfpacc1x1 = (float) vacc1x1;
- const float vscale = params->fp32_scalar_magic.scale;
+ const float vscale = params->fp32_scalar_fmagic.scale;
vfpacc0x0 *= vscale;
vfpacc0x1 *= vscale;
vfpacc1x0 *= vscale;
vfpacc1x1 *= vscale;
- const float voutput_min_less_zero_point = params->fp32_scalar_magic.output_min_less_zero_point;
+ const float voutput_min_less_zero_point = params->fp32_scalar_fmagic.output_min_less_zero_point;
vfpacc0x0 = math_max_f32(vfpacc0x0, voutput_min_less_zero_point);
vfpacc0x1 = math_max_f32(vfpacc0x1, voutput_min_less_zero_point);
vfpacc1x0 = math_max_f32(vfpacc1x0, voutput_min_less_zero_point);
vfpacc1x1 = math_max_f32(vfpacc1x1, voutput_min_less_zero_point);
- const float voutput_max_less_zero_point = params->fp32_scalar_magic.output_max_less_zero_point;
+ const float voutput_max_less_zero_point = params->fp32_scalar_fmagic.output_max_less_zero_point;
vfpacc0x0 = math_min_f32(vfpacc0x0, voutput_max_less_zero_point);
vfpacc0x1 = math_min_f32(vfpacc0x1, voutput_max_less_zero_point);
vfpacc1x0 = math_min_f32(vfpacc1x0, voutput_max_less_zero_point);
vfpacc1x1 = math_min_f32(vfpacc1x1, voutput_max_less_zero_point);
- const float vmagic_bias = params->fp32_scalar_magic.magic_bias;
+ const float vmagic_bias = params->fp32_scalar_fmagic.magic_bias;
vfpacc0x0 += vmagic_bias;
vfpacc0x1 += vmagic_bias;
vfpacc1x0 += vmagic_bias;
vfpacc1x1 += vmagic_bias;
- const int32_t vmagic_bias_less_output_zero_point = params->fp32_scalar_magic.magic_bias_less_output_zero_point;
+ const int32_t vmagic_bias_less_output_zero_point = params->fp32_scalar_fmagic.magic_bias_less_output_zero_point;
int32_t vout0x0 = (int32_t) fp32_to_bits(vfpacc0x0) - vmagic_bias_less_output_zero_point;
int32_t vout0x1 = (int32_t) fp32_to_bits(vfpacc0x1) - vmagic_bias_less_output_zero_point;
int32_t vout1x0 = (int32_t) fp32_to_bits(vfpacc1x0) - vmagic_bias_less_output_zero_point;
diff --git a/src/qu8-gemm/gen/2x2-minmax-fp32-scalar-lrint.c b/src/qu8-gemm/gen/2x2-minmax-fp32-scalar-lrint.c
deleted file mode 100644
index ff69821..0000000
--- a/src/qu8-gemm/gen/2x2-minmax-fp32-scalar-lrint.c
+++ /dev/null
@@ -1,124 +0,0 @@
-// Auto-generated file. Do not edit!
-// Template: src/qs8-gemm/scalar.c.in
-// Generator: tools/xngen
-//
-// Copyright 2021 Google LLC
-//
-// This source code is licensed under the BSD-style license found in the
-// LICENSE file in the root directory of this source tree.
-
-#include <assert.h>
-#include <math.h>
-
-#include <xnnpack/math.h>
-#include <xnnpack/gemm.h>
-
-
-void xnn_qu8_gemm_minmax_fp32_ukernel_2x2__scalar_lrint(
- size_t mr,
- size_t nc,
- size_t kc,
- const uint8_t* restrict a,
- size_t a_stride,
- const void* restrict w,
- uint8_t* restrict c,
- size_t cm_stride,
- size_t cn_stride,
- const union xnn_qu8_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
-{
- assert(mr != 0);
- assert(mr <= 2);
- assert(nc != 0);
- assert(kc != 0);
-
- const uint8_t* a0 = a;
- uint8_t* c0 = c;
- const uint8_t* a1 = (const uint8_t*) ((uintptr_t) a0 + a_stride);
- uint8_t* c1 = (uint8_t*) ((uintptr_t) c0 + cm_stride);
- if XNN_UNPREDICTABLE(mr != 2) {
- a1 = a0;
- c1 = c0;
- }
-
- const int32_t vb_zero_point = params->fp32_scalar_lrint.kernel_zero_point;
- do {
- int32_t vacc0x0 = ((const int32_t*) w)[0];
- int32_t vacc0x1 = ((const int32_t*) w)[1];
- int32_t vacc1x0 = vacc0x0;
- int32_t vacc1x1 = vacc0x1;
- w = (const void*) ((const int32_t*) w + 2);
-
- size_t k = kc;
- do {
- const int32_t va0 = (int32_t) (uint32_t) *a0++;
- const int32_t va1 = (int32_t) (uint32_t) *a1++;
-
- const int32_t vb0 = (int32_t) (uint32_t) ((const uint8_t*) w)[0] - vb_zero_point;
- const int32_t vb1 = (int32_t) (uint32_t) ((const uint8_t*) w)[1] - vb_zero_point;
- w = (const void*) ((const uint8_t*) w + 2);
-
- vacc0x0 += va0 * vb0;
- vacc0x1 += va0 * vb1;
- vacc1x0 += va1 * vb0;
- vacc1x1 += va1 * vb1;
-
- k -= sizeof(uint8_t);
- } while (k != 0);
-
- float vfpacc0x0 = (float) vacc0x0;
- float vfpacc0x1 = (float) vacc0x1;
- float vfpacc1x0 = (float) vacc1x0;
- float vfpacc1x1 = (float) vacc1x1;
-
- const float vscale = params->fp32_scalar_lrint.scale;
- vfpacc0x0 *= vscale;
- vfpacc0x1 *= vscale;
- vfpacc1x0 *= vscale;
- vfpacc1x1 *= vscale;
-
- long vrndacc0x0 = lrintf(vfpacc0x0);
- long vrndacc0x1 = lrintf(vfpacc0x1);
- long vrndacc1x0 = lrintf(vfpacc1x0);
- long vrndacc1x1 = lrintf(vfpacc1x1);
-
- const long voutput_min_less_zero_point = params->fp32_scalar_lrint.output_min_less_zero_point;
- vrndacc0x0 = XNN_UNPREDICTABLE(vrndacc0x0 < voutput_min_less_zero_point) ? voutput_min_less_zero_point : vrndacc0x0;
- vrndacc0x1 = XNN_UNPREDICTABLE(vrndacc0x1 < voutput_min_less_zero_point) ? voutput_min_less_zero_point : vrndacc0x1;
- vrndacc1x0 = XNN_UNPREDICTABLE(vrndacc1x0 < voutput_min_less_zero_point) ? voutput_min_less_zero_point : vrndacc1x0;
- vrndacc1x1 = XNN_UNPREDICTABLE(vrndacc1x1 < voutput_min_less_zero_point) ? voutput_min_less_zero_point : vrndacc1x1;
-
- const long voutput_max_less_zero_point = params->fp32_scalar_lrint.output_max_less_zero_point;
- vrndacc0x0 = XNN_UNPREDICTABLE(vrndacc0x0 > voutput_max_less_zero_point) ? voutput_max_less_zero_point : vrndacc0x0;
- vrndacc0x1 = XNN_UNPREDICTABLE(vrndacc0x1 > voutput_max_less_zero_point) ? voutput_max_less_zero_point : vrndacc0x1;
- vrndacc1x0 = XNN_UNPREDICTABLE(vrndacc1x0 > voutput_max_less_zero_point) ? voutput_max_less_zero_point : vrndacc1x0;
- vrndacc1x1 = XNN_UNPREDICTABLE(vrndacc1x1 > voutput_max_less_zero_point) ? voutput_max_less_zero_point : vrndacc1x1;
-
- const int32_t voutput_zero_point = params->fp32_scalar_lrint.output_zero_point;
- int32_t vout0x0 = (int32_t) vrndacc0x0 + voutput_zero_point;
- int32_t vout0x1 = (int32_t) vrndacc0x1 + voutput_zero_point;
- int32_t vout1x0 = (int32_t) vrndacc1x0 + voutput_zero_point;
- int32_t vout1x1 = (int32_t) vrndacc1x1 + voutput_zero_point;
-
- if XNN_LIKELY(nc >= 2) {
- c0[0] = (uint8_t) vout0x0;
- c0[1] = (uint8_t) vout0x1;
- c1[0] = (uint8_t) vout1x0;
- c1[1] = (uint8_t) vout1x1;
-
- a0 = (const uint8_t*) ((uintptr_t) a0 - kc);
- a1 = (const uint8_t*) ((uintptr_t) a1 - kc);
-
- c0 = (uint8_t*) ((uintptr_t) c0 + cn_stride);
- c1 = (uint8_t*) ((uintptr_t) c1 + cn_stride);
-
- nc -= 2;
- } else {
- if (nc & 1) {
- c0[0] = (uint8_t) vout0x0;
- c1[0] = (uint8_t) vout1x0;
- }
-
- nc = 0;
- }
- } while (nc != 0);
-}
diff --git a/src/qu8-gemm/gen/2x2-minmax-fp32-scalar-magic.c b/src/qu8-gemm/gen/2x2-minmax-fp32-scalar-lrintf.c
similarity index 76%
copy from src/qu8-gemm/gen/2x2-minmax-fp32-scalar-magic.c
copy to src/qu8-gemm/gen/2x2-minmax-fp32-scalar-lrintf.c
index 050b4d2..73fdea7 100644
--- a/src/qu8-gemm/gen/2x2-minmax-fp32-scalar-magic.c
+++ b/src/qu8-gemm/gen/2x2-minmax-fp32-scalar-lrintf.c
@@ -8,14 +8,13 @@
// LICENSE file in the root directory of this source tree.
#include <assert.h>
-
-#include <fp16.h>
+#include <math.h>
#include <xnnpack/math.h>
#include <xnnpack/gemm.h>
-void xnn_qu8_gemm_minmax_fp32_ukernel_2x2__scalar_magic(
+void xnn_qu8_gemm_minmax_fp32_ukernel_2x2__scalar_lrintf(
size_t mr,
size_t nc,
size_t kc,
@@ -41,7 +40,7 @@
c1 = c0;
}
- const int32_t vb_zero_point = params->fp32_scalar_magic.kernel_zero_point;
+ const int32_t vb_zero_point = params->fp32_scalar_lrintf.kernel_zero_point;
do {
int32_t vacc0x0 = ((const int32_t*) w)[0];
int32_t vacc0x1 = ((const int32_t*) w)[1];
@@ -71,35 +70,34 @@
float vfpacc1x0 = (float) vacc1x0;
float vfpacc1x1 = (float) vacc1x1;
- const float vscale = params->fp32_scalar_magic.scale;
+ const float vscale = params->fp32_scalar_lrintf.scale;
vfpacc0x0 *= vscale;
vfpacc0x1 *= vscale;
vfpacc1x0 *= vscale;
vfpacc1x1 *= vscale;
- const float voutput_min_less_zero_point = params->fp32_scalar_magic.output_min_less_zero_point;
+ const float voutput_min_less_zero_point = params->fp32_scalar_lrintf.output_min_less_zero_point;
vfpacc0x0 = math_max_f32(vfpacc0x0, voutput_min_less_zero_point);
vfpacc0x1 = math_max_f32(vfpacc0x1, voutput_min_less_zero_point);
vfpacc1x0 = math_max_f32(vfpacc1x0, voutput_min_less_zero_point);
vfpacc1x1 = math_max_f32(vfpacc1x1, voutput_min_less_zero_point);
- const float voutput_max_less_zero_point = params->fp32_scalar_magic.output_max_less_zero_point;
+ const float voutput_max_less_zero_point = params->fp32_scalar_lrintf.output_max_less_zero_point;
vfpacc0x0 = math_min_f32(vfpacc0x0, voutput_max_less_zero_point);
vfpacc0x1 = math_min_f32(vfpacc0x1, voutput_max_less_zero_point);
vfpacc1x0 = math_min_f32(vfpacc1x0, voutput_max_less_zero_point);
vfpacc1x1 = math_min_f32(vfpacc1x1, voutput_max_less_zero_point);
- const float vmagic_bias = params->fp32_scalar_magic.magic_bias;
- vfpacc0x0 += vmagic_bias;
- vfpacc0x1 += vmagic_bias;
- vfpacc1x0 += vmagic_bias;
- vfpacc1x1 += vmagic_bias;
+ const int32_t vrndacc0x0 = (int32_t) lrintf(vfpacc0x0);
+ const int32_t vrndacc0x1 = (int32_t) lrintf(vfpacc0x1);
+ const int32_t vrndacc1x0 = (int32_t) lrintf(vfpacc1x0);
+ const int32_t vrndacc1x1 = (int32_t) lrintf(vfpacc1x1);
- const int32_t vmagic_bias_less_output_zero_point = params->fp32_scalar_magic.magic_bias_less_output_zero_point;
- int32_t vout0x0 = (int32_t) fp32_to_bits(vfpacc0x0) - vmagic_bias_less_output_zero_point;
- int32_t vout0x1 = (int32_t) fp32_to_bits(vfpacc0x1) - vmagic_bias_less_output_zero_point;
- int32_t vout1x0 = (int32_t) fp32_to_bits(vfpacc1x0) - vmagic_bias_less_output_zero_point;
- int32_t vout1x1 = (int32_t) fp32_to_bits(vfpacc1x1) - vmagic_bias_less_output_zero_point;
+ const int32_t voutput_zero_point = params->fp32_scalar_lrintf.output_zero_point;
+ int32_t vout0x0 = vrndacc0x0 + voutput_zero_point;
+ int32_t vout0x1 = vrndacc0x1 + voutput_zero_point;
+ int32_t vout1x0 = vrndacc1x0 + voutput_zero_point;
+ int32_t vout1x1 = vrndacc1x1 + voutput_zero_point;
if XNN_LIKELY(nc >= 2) {
c0[0] = (uint8_t) vout0x0;
diff --git a/src/qu8-gemm/gen/2x4-minmax-fp32-scalar-magic.c b/src/qu8-gemm/gen/2x4-minmax-fp32-scalar-fmagic.c
similarity index 93%
rename from src/qu8-gemm/gen/2x4-minmax-fp32-scalar-magic.c
rename to src/qu8-gemm/gen/2x4-minmax-fp32-scalar-fmagic.c
index e26ea58..dfc6844 100644
--- a/src/qu8-gemm/gen/2x4-minmax-fp32-scalar-magic.c
+++ b/src/qu8-gemm/gen/2x4-minmax-fp32-scalar-fmagic.c
@@ -15,7 +15,7 @@
#include <xnnpack/gemm.h>
-void xnn_qu8_gemm_minmax_fp32_ukernel_2x4__scalar_magic(
+void xnn_qu8_gemm_minmax_fp32_ukernel_2x4__scalar_fmagic(
size_t mr,
size_t nc,
size_t kc,
@@ -41,7 +41,7 @@
c1 = c0;
}
- const int32_t vb_zero_point = params->fp32_scalar_magic.kernel_zero_point;
+ const int32_t vb_zero_point = params->fp32_scalar_fmagic.kernel_zero_point;
do {
int32_t vacc0x0 = ((const int32_t*) w)[0];
int32_t vacc0x1 = ((const int32_t*) w)[1];
@@ -85,7 +85,7 @@
float vfpacc1x2 = (float) vacc1x2;
float vfpacc1x3 = (float) vacc1x3;
- const float vscale = params->fp32_scalar_magic.scale;
+ const float vscale = params->fp32_scalar_fmagic.scale;
vfpacc0x0 *= vscale;
vfpacc0x1 *= vscale;
vfpacc0x2 *= vscale;
@@ -95,7 +95,7 @@
vfpacc1x2 *= vscale;
vfpacc1x3 *= vscale;
- const float voutput_min_less_zero_point = params->fp32_scalar_magic.output_min_less_zero_point;
+ const float voutput_min_less_zero_point = params->fp32_scalar_fmagic.output_min_less_zero_point;
vfpacc0x0 = math_max_f32(vfpacc0x0, voutput_min_less_zero_point);
vfpacc0x1 = math_max_f32(vfpacc0x1, voutput_min_less_zero_point);
vfpacc0x2 = math_max_f32(vfpacc0x2, voutput_min_less_zero_point);
@@ -105,7 +105,7 @@
vfpacc1x2 = math_max_f32(vfpacc1x2, voutput_min_less_zero_point);
vfpacc1x3 = math_max_f32(vfpacc1x3, voutput_min_less_zero_point);
- const float voutput_max_less_zero_point = params->fp32_scalar_magic.output_max_less_zero_point;
+ const float voutput_max_less_zero_point = params->fp32_scalar_fmagic.output_max_less_zero_point;
vfpacc0x0 = math_min_f32(vfpacc0x0, voutput_max_less_zero_point);
vfpacc0x1 = math_min_f32(vfpacc0x1, voutput_max_less_zero_point);
vfpacc0x2 = math_min_f32(vfpacc0x2, voutput_max_less_zero_point);
@@ -115,7 +115,7 @@
vfpacc1x2 = math_min_f32(vfpacc1x2, voutput_max_less_zero_point);
vfpacc1x3 = math_min_f32(vfpacc1x3, voutput_max_less_zero_point);
- const float vmagic_bias = params->fp32_scalar_magic.magic_bias;
+ const float vmagic_bias = params->fp32_scalar_fmagic.magic_bias;
vfpacc0x0 += vmagic_bias;
vfpacc0x1 += vmagic_bias;
vfpacc0x2 += vmagic_bias;
@@ -125,7 +125,7 @@
vfpacc1x2 += vmagic_bias;
vfpacc1x3 += vmagic_bias;
- const int32_t vmagic_bias_less_output_zero_point = params->fp32_scalar_magic.magic_bias_less_output_zero_point;
+ const int32_t vmagic_bias_less_output_zero_point = params->fp32_scalar_fmagic.magic_bias_less_output_zero_point;
int32_t vout0x0 = (int32_t) fp32_to_bits(vfpacc0x0) - vmagic_bias_less_output_zero_point;
int32_t vout0x1 = (int32_t) fp32_to_bits(vfpacc0x1) - vmagic_bias_less_output_zero_point;
int32_t vout0x2 = (int32_t) fp32_to_bits(vfpacc0x2) - vmagic_bias_less_output_zero_point;
diff --git a/src/qu8-gemm/gen/2x4-minmax-fp32-scalar-lrint.c b/src/qu8-gemm/gen/2x4-minmax-fp32-scalar-lrint.c
deleted file mode 100644
index 6a6567c..0000000
--- a/src/qu8-gemm/gen/2x4-minmax-fp32-scalar-lrint.c
+++ /dev/null
@@ -1,172 +0,0 @@
-// Auto-generated file. Do not edit!
-// Template: src/qs8-gemm/scalar.c.in
-// Generator: tools/xngen
-//
-// Copyright 2021 Google LLC
-//
-// This source code is licensed under the BSD-style license found in the
-// LICENSE file in the root directory of this source tree.
-
-#include <assert.h>
-#include <math.h>
-
-#include <xnnpack/math.h>
-#include <xnnpack/gemm.h>
-
-
-void xnn_qu8_gemm_minmax_fp32_ukernel_2x4__scalar_lrint(
- size_t mr,
- size_t nc,
- size_t kc,
- const uint8_t* restrict a,
- size_t a_stride,
- const void* restrict w,
- uint8_t* restrict c,
- size_t cm_stride,
- size_t cn_stride,
- const union xnn_qu8_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
-{
- assert(mr != 0);
- assert(mr <= 2);
- assert(nc != 0);
- assert(kc != 0);
-
- const uint8_t* a0 = a;
- uint8_t* c0 = c;
- const uint8_t* a1 = (const uint8_t*) ((uintptr_t) a0 + a_stride);
- uint8_t* c1 = (uint8_t*) ((uintptr_t) c0 + cm_stride);
- if XNN_UNPREDICTABLE(mr != 2) {
- a1 = a0;
- c1 = c0;
- }
-
- const int32_t vb_zero_point = params->fp32_scalar_lrint.kernel_zero_point;
- do {
- int32_t vacc0x0 = ((const int32_t*) w)[0];
- int32_t vacc0x1 = ((const int32_t*) w)[1];
- int32_t vacc0x2 = ((const int32_t*) w)[2];
- int32_t vacc0x3 = ((const int32_t*) w)[3];
- int32_t vacc1x0 = vacc0x0;
- int32_t vacc1x1 = vacc0x1;
- int32_t vacc1x2 = vacc0x2;
- int32_t vacc1x3 = vacc0x3;
- w = (const void*) ((const int32_t*) w + 4);
-
- size_t k = kc;
- do {
- const int32_t va0 = (int32_t) (uint32_t) *a0++;
- const int32_t va1 = (int32_t) (uint32_t) *a1++;
-
- const int32_t vb0 = (int32_t) (uint32_t) ((const uint8_t*) w)[0] - vb_zero_point;
- const int32_t vb1 = (int32_t) (uint32_t) ((const uint8_t*) w)[1] - vb_zero_point;
- const int32_t vb2 = (int32_t) (uint32_t) ((const uint8_t*) w)[2] - vb_zero_point;
- const int32_t vb3 = (int32_t) (uint32_t) ((const uint8_t*) w)[3] - vb_zero_point;
- w = (const void*) ((const uint8_t*) w + 4);
-
- vacc0x0 += va0 * vb0;
- vacc0x1 += va0 * vb1;
- vacc0x2 += va0 * vb2;
- vacc0x3 += va0 * vb3;
- vacc1x0 += va1 * vb0;
- vacc1x1 += va1 * vb1;
- vacc1x2 += va1 * vb2;
- vacc1x3 += va1 * vb3;
-
- k -= sizeof(uint8_t);
- } while (k != 0);
-
- float vfpacc0x0 = (float) vacc0x0;
- float vfpacc0x1 = (float) vacc0x1;
- float vfpacc0x2 = (float) vacc0x2;
- float vfpacc0x3 = (float) vacc0x3;
- float vfpacc1x0 = (float) vacc1x0;
- float vfpacc1x1 = (float) vacc1x1;
- float vfpacc1x2 = (float) vacc1x2;
- float vfpacc1x3 = (float) vacc1x3;
-
- const float vscale = params->fp32_scalar_lrint.scale;
- vfpacc0x0 *= vscale;
- vfpacc0x1 *= vscale;
- vfpacc0x2 *= vscale;
- vfpacc0x3 *= vscale;
- vfpacc1x0 *= vscale;
- vfpacc1x1 *= vscale;
- vfpacc1x2 *= vscale;
- vfpacc1x3 *= vscale;
-
- long vrndacc0x0 = lrintf(vfpacc0x0);
- long vrndacc0x1 = lrintf(vfpacc0x1);
- long vrndacc0x2 = lrintf(vfpacc0x2);
- long vrndacc0x3 = lrintf(vfpacc0x3);
- long vrndacc1x0 = lrintf(vfpacc1x0);
- long vrndacc1x1 = lrintf(vfpacc1x1);
- long vrndacc1x2 = lrintf(vfpacc1x2);
- long vrndacc1x3 = lrintf(vfpacc1x3);
-
- const long voutput_min_less_zero_point = params->fp32_scalar_lrint.output_min_less_zero_point;
- vrndacc0x0 = XNN_UNPREDICTABLE(vrndacc0x0 < voutput_min_less_zero_point) ? voutput_min_less_zero_point : vrndacc0x0;
- vrndacc0x1 = XNN_UNPREDICTABLE(vrndacc0x1 < voutput_min_less_zero_point) ? voutput_min_less_zero_point : vrndacc0x1;
- vrndacc0x2 = XNN_UNPREDICTABLE(vrndacc0x2 < voutput_min_less_zero_point) ? voutput_min_less_zero_point : vrndacc0x2;
- vrndacc0x3 = XNN_UNPREDICTABLE(vrndacc0x3 < voutput_min_less_zero_point) ? voutput_min_less_zero_point : vrndacc0x3;
- vrndacc1x0 = XNN_UNPREDICTABLE(vrndacc1x0 < voutput_min_less_zero_point) ? voutput_min_less_zero_point : vrndacc1x0;
- vrndacc1x1 = XNN_UNPREDICTABLE(vrndacc1x1 < voutput_min_less_zero_point) ? voutput_min_less_zero_point : vrndacc1x1;
- vrndacc1x2 = XNN_UNPREDICTABLE(vrndacc1x2 < voutput_min_less_zero_point) ? voutput_min_less_zero_point : vrndacc1x2;
- vrndacc1x3 = XNN_UNPREDICTABLE(vrndacc1x3 < voutput_min_less_zero_point) ? voutput_min_less_zero_point : vrndacc1x3;
-
- const long voutput_max_less_zero_point = params->fp32_scalar_lrint.output_max_less_zero_point;
- vrndacc0x0 = XNN_UNPREDICTABLE(vrndacc0x0 > voutput_max_less_zero_point) ? voutput_max_less_zero_point : vrndacc0x0;
- vrndacc0x1 = XNN_UNPREDICTABLE(vrndacc0x1 > voutput_max_less_zero_point) ? voutput_max_less_zero_point : vrndacc0x1;
- vrndacc0x2 = XNN_UNPREDICTABLE(vrndacc0x2 > voutput_max_less_zero_point) ? voutput_max_less_zero_point : vrndacc0x2;
- vrndacc0x3 = XNN_UNPREDICTABLE(vrndacc0x3 > voutput_max_less_zero_point) ? voutput_max_less_zero_point : vrndacc0x3;
- vrndacc1x0 = XNN_UNPREDICTABLE(vrndacc1x0 > voutput_max_less_zero_point) ? voutput_max_less_zero_point : vrndacc1x0;
- vrndacc1x1 = XNN_UNPREDICTABLE(vrndacc1x1 > voutput_max_less_zero_point) ? voutput_max_less_zero_point : vrndacc1x1;
- vrndacc1x2 = XNN_UNPREDICTABLE(vrndacc1x2 > voutput_max_less_zero_point) ? voutput_max_less_zero_point : vrndacc1x2;
- vrndacc1x3 = XNN_UNPREDICTABLE(vrndacc1x3 > voutput_max_less_zero_point) ? voutput_max_less_zero_point : vrndacc1x3;
-
- const int32_t voutput_zero_point = params->fp32_scalar_lrint.output_zero_point;
- int32_t vout0x0 = (int32_t) vrndacc0x0 + voutput_zero_point;
- int32_t vout0x1 = (int32_t) vrndacc0x1 + voutput_zero_point;
- int32_t vout0x2 = (int32_t) vrndacc0x2 + voutput_zero_point;
- int32_t vout0x3 = (int32_t) vrndacc0x3 + voutput_zero_point;
- int32_t vout1x0 = (int32_t) vrndacc1x0 + voutput_zero_point;
- int32_t vout1x1 = (int32_t) vrndacc1x1 + voutput_zero_point;
- int32_t vout1x2 = (int32_t) vrndacc1x2 + voutput_zero_point;
- int32_t vout1x3 = (int32_t) vrndacc1x3 + voutput_zero_point;
-
- if XNN_LIKELY(nc >= 4) {
- c0[0] = (uint8_t) vout0x0;
- c0[1] = (uint8_t) vout0x1;
- c0[2] = (uint8_t) vout0x2;
- c0[3] = (uint8_t) vout0x3;
- c1[0] = (uint8_t) vout1x0;
- c1[1] = (uint8_t) vout1x1;
- c1[2] = (uint8_t) vout1x2;
- c1[3] = (uint8_t) vout1x3;
-
- a0 = (const uint8_t*) ((uintptr_t) a0 - kc);
- a1 = (const uint8_t*) ((uintptr_t) a1 - kc);
-
- c0 = (uint8_t*) ((uintptr_t) c0 + cn_stride);
- c1 = (uint8_t*) ((uintptr_t) c1 + cn_stride);
-
- nc -= 4;
- } else {
- if (nc & 2) {
- c0[0] = (uint8_t) vout0x0;
- c0[1] = (uint8_t) vout0x1;
- vout0x0 = vout0x2;
- c0 += 2;
- c1[0] = (uint8_t) vout1x0;
- c1[1] = (uint8_t) vout1x1;
- vout1x0 = vout1x2;
- c1 += 2;
- }
- if (nc & 1) {
- c0[0] = (uint8_t) vout0x0;
- c1[0] = (uint8_t) vout1x0;
- }
-
- nc = 0;
- }
- } while (nc != 0);
-}
diff --git a/src/qu8-gemm/gen/2x4-minmax-fp32-scalar-magic.c b/src/qu8-gemm/gen/2x4-minmax-fp32-scalar-lrintf.c
similarity index 76%
copy from src/qu8-gemm/gen/2x4-minmax-fp32-scalar-magic.c
copy to src/qu8-gemm/gen/2x4-minmax-fp32-scalar-lrintf.c
index e26ea58..6192f65 100644
--- a/src/qu8-gemm/gen/2x4-minmax-fp32-scalar-magic.c
+++ b/src/qu8-gemm/gen/2x4-minmax-fp32-scalar-lrintf.c
@@ -8,14 +8,13 @@
// LICENSE file in the root directory of this source tree.
#include <assert.h>
-
-#include <fp16.h>
+#include <math.h>
#include <xnnpack/math.h>
#include <xnnpack/gemm.h>
-void xnn_qu8_gemm_minmax_fp32_ukernel_2x4__scalar_magic(
+void xnn_qu8_gemm_minmax_fp32_ukernel_2x4__scalar_lrintf(
size_t mr,
size_t nc,
size_t kc,
@@ -41,7 +40,7 @@
c1 = c0;
}
- const int32_t vb_zero_point = params->fp32_scalar_magic.kernel_zero_point;
+ const int32_t vb_zero_point = params->fp32_scalar_lrintf.kernel_zero_point;
do {
int32_t vacc0x0 = ((const int32_t*) w)[0];
int32_t vacc0x1 = ((const int32_t*) w)[1];
@@ -85,7 +84,7 @@
float vfpacc1x2 = (float) vacc1x2;
float vfpacc1x3 = (float) vacc1x3;
- const float vscale = params->fp32_scalar_magic.scale;
+ const float vscale = params->fp32_scalar_lrintf.scale;
vfpacc0x0 *= vscale;
vfpacc0x1 *= vscale;
vfpacc0x2 *= vscale;
@@ -95,7 +94,7 @@
vfpacc1x2 *= vscale;
vfpacc1x3 *= vscale;
- const float voutput_min_less_zero_point = params->fp32_scalar_magic.output_min_less_zero_point;
+ const float voutput_min_less_zero_point = params->fp32_scalar_lrintf.output_min_less_zero_point;
vfpacc0x0 = math_max_f32(vfpacc0x0, voutput_min_less_zero_point);
vfpacc0x1 = math_max_f32(vfpacc0x1, voutput_min_less_zero_point);
vfpacc0x2 = math_max_f32(vfpacc0x2, voutput_min_less_zero_point);
@@ -105,7 +104,7 @@
vfpacc1x2 = math_max_f32(vfpacc1x2, voutput_min_less_zero_point);
vfpacc1x3 = math_max_f32(vfpacc1x3, voutput_min_less_zero_point);
- const float voutput_max_less_zero_point = params->fp32_scalar_magic.output_max_less_zero_point;
+ const float voutput_max_less_zero_point = params->fp32_scalar_lrintf.output_max_less_zero_point;
vfpacc0x0 = math_min_f32(vfpacc0x0, voutput_max_less_zero_point);
vfpacc0x1 = math_min_f32(vfpacc0x1, voutput_max_less_zero_point);
vfpacc0x2 = math_min_f32(vfpacc0x2, voutput_max_less_zero_point);
@@ -115,25 +114,24 @@
vfpacc1x2 = math_min_f32(vfpacc1x2, voutput_max_less_zero_point);
vfpacc1x3 = math_min_f32(vfpacc1x3, voutput_max_less_zero_point);
- const float vmagic_bias = params->fp32_scalar_magic.magic_bias;
- vfpacc0x0 += vmagic_bias;
- vfpacc0x1 += vmagic_bias;
- vfpacc0x2 += vmagic_bias;
- vfpacc0x3 += vmagic_bias;
- vfpacc1x0 += vmagic_bias;
- vfpacc1x1 += vmagic_bias;
- vfpacc1x2 += vmagic_bias;
- vfpacc1x3 += vmagic_bias;
+ const int32_t vrndacc0x0 = (int32_t) lrintf(vfpacc0x0);
+ const int32_t vrndacc0x1 = (int32_t) lrintf(vfpacc0x1);
+ const int32_t vrndacc0x2 = (int32_t) lrintf(vfpacc0x2);
+ const int32_t vrndacc0x3 = (int32_t) lrintf(vfpacc0x3);
+ const int32_t vrndacc1x0 = (int32_t) lrintf(vfpacc1x0);
+ const int32_t vrndacc1x1 = (int32_t) lrintf(vfpacc1x1);
+ const int32_t vrndacc1x2 = (int32_t) lrintf(vfpacc1x2);
+ const int32_t vrndacc1x3 = (int32_t) lrintf(vfpacc1x3);
- const int32_t vmagic_bias_less_output_zero_point = params->fp32_scalar_magic.magic_bias_less_output_zero_point;
- int32_t vout0x0 = (int32_t) fp32_to_bits(vfpacc0x0) - vmagic_bias_less_output_zero_point;
- int32_t vout0x1 = (int32_t) fp32_to_bits(vfpacc0x1) - vmagic_bias_less_output_zero_point;
- int32_t vout0x2 = (int32_t) fp32_to_bits(vfpacc0x2) - vmagic_bias_less_output_zero_point;
- int32_t vout0x3 = (int32_t) fp32_to_bits(vfpacc0x3) - vmagic_bias_less_output_zero_point;
- int32_t vout1x0 = (int32_t) fp32_to_bits(vfpacc1x0) - vmagic_bias_less_output_zero_point;
- int32_t vout1x1 = (int32_t) fp32_to_bits(vfpacc1x1) - vmagic_bias_less_output_zero_point;
- int32_t vout1x2 = (int32_t) fp32_to_bits(vfpacc1x2) - vmagic_bias_less_output_zero_point;
- int32_t vout1x3 = (int32_t) fp32_to_bits(vfpacc1x3) - vmagic_bias_less_output_zero_point;
+ const int32_t voutput_zero_point = params->fp32_scalar_lrintf.output_zero_point;
+ int32_t vout0x0 = vrndacc0x0 + voutput_zero_point;
+ int32_t vout0x1 = vrndacc0x1 + voutput_zero_point;
+ int32_t vout0x2 = vrndacc0x2 + voutput_zero_point;
+ int32_t vout0x3 = vrndacc0x3 + voutput_zero_point;
+ int32_t vout1x0 = vrndacc1x0 + voutput_zero_point;
+ int32_t vout1x1 = vrndacc1x1 + voutput_zero_point;
+ int32_t vout1x2 = vrndacc1x2 + voutput_zero_point;
+ int32_t vout1x3 = vrndacc1x3 + voutput_zero_point;
if XNN_LIKELY(nc >= 4) {
c0[0] = (uint8_t) vout0x0;
diff --git a/src/qu8-gemm/gen/3x2-minmax-fp32-scalar-magic.c b/src/qu8-gemm/gen/3x2-minmax-fp32-scalar-fmagic.c
similarity index 92%
rename from src/qu8-gemm/gen/3x2-minmax-fp32-scalar-magic.c
rename to src/qu8-gemm/gen/3x2-minmax-fp32-scalar-fmagic.c
index bcc7787..7533e9c 100644
--- a/src/qu8-gemm/gen/3x2-minmax-fp32-scalar-magic.c
+++ b/src/qu8-gemm/gen/3x2-minmax-fp32-scalar-fmagic.c
@@ -15,7 +15,7 @@
#include <xnnpack/gemm.h>
-void xnn_qu8_gemm_minmax_fp32_ukernel_3x2__scalar_magic(
+void xnn_qu8_gemm_minmax_fp32_ukernel_3x2__scalar_fmagic(
size_t mr,
size_t nc,
size_t kc,
@@ -47,7 +47,7 @@
c2 = c1;
}
- const int32_t vb_zero_point = params->fp32_scalar_magic.kernel_zero_point;
+ const int32_t vb_zero_point = params->fp32_scalar_fmagic.kernel_zero_point;
do {
int32_t vacc0x0 = ((const int32_t*) w)[0];
int32_t vacc0x1 = ((const int32_t*) w)[1];
@@ -84,7 +84,7 @@
float vfpacc2x0 = (float) vacc2x0;
float vfpacc2x1 = (float) vacc2x1;
- const float vscale = params->fp32_scalar_magic.scale;
+ const float vscale = params->fp32_scalar_fmagic.scale;
vfpacc0x0 *= vscale;
vfpacc0x1 *= vscale;
vfpacc1x0 *= vscale;
@@ -92,7 +92,7 @@
vfpacc2x0 *= vscale;
vfpacc2x1 *= vscale;
- const float voutput_min_less_zero_point = params->fp32_scalar_magic.output_min_less_zero_point;
+ const float voutput_min_less_zero_point = params->fp32_scalar_fmagic.output_min_less_zero_point;
vfpacc0x0 = math_max_f32(vfpacc0x0, voutput_min_less_zero_point);
vfpacc0x1 = math_max_f32(vfpacc0x1, voutput_min_less_zero_point);
vfpacc1x0 = math_max_f32(vfpacc1x0, voutput_min_less_zero_point);
@@ -100,7 +100,7 @@
vfpacc2x0 = math_max_f32(vfpacc2x0, voutput_min_less_zero_point);
vfpacc2x1 = math_max_f32(vfpacc2x1, voutput_min_less_zero_point);
- const float voutput_max_less_zero_point = params->fp32_scalar_magic.output_max_less_zero_point;
+ const float voutput_max_less_zero_point = params->fp32_scalar_fmagic.output_max_less_zero_point;
vfpacc0x0 = math_min_f32(vfpacc0x0, voutput_max_less_zero_point);
vfpacc0x1 = math_min_f32(vfpacc0x1, voutput_max_less_zero_point);
vfpacc1x0 = math_min_f32(vfpacc1x0, voutput_max_less_zero_point);
@@ -108,7 +108,7 @@
vfpacc2x0 = math_min_f32(vfpacc2x0, voutput_max_less_zero_point);
vfpacc2x1 = math_min_f32(vfpacc2x1, voutput_max_less_zero_point);
- const float vmagic_bias = params->fp32_scalar_magic.magic_bias;
+ const float vmagic_bias = params->fp32_scalar_fmagic.magic_bias;
vfpacc0x0 += vmagic_bias;
vfpacc0x1 += vmagic_bias;
vfpacc1x0 += vmagic_bias;
@@ -116,7 +116,7 @@
vfpacc2x0 += vmagic_bias;
vfpacc2x1 += vmagic_bias;
- const int32_t vmagic_bias_less_output_zero_point = params->fp32_scalar_magic.magic_bias_less_output_zero_point;
+ const int32_t vmagic_bias_less_output_zero_point = params->fp32_scalar_fmagic.magic_bias_less_output_zero_point;
int32_t vout0x0 = (int32_t) fp32_to_bits(vfpacc0x0) - vmagic_bias_less_output_zero_point;
int32_t vout0x1 = (int32_t) fp32_to_bits(vfpacc0x1) - vmagic_bias_less_output_zero_point;
int32_t vout1x0 = (int32_t) fp32_to_bits(vfpacc1x0) - vmagic_bias_less_output_zero_point;
diff --git a/src/qu8-gemm/gen/3x2-minmax-fp32-scalar-lrint.c b/src/qu8-gemm/gen/3x2-minmax-fp32-scalar-lrint.c
deleted file mode 100644
index 92f0dac..0000000
--- a/src/qu8-gemm/gen/3x2-minmax-fp32-scalar-lrint.c
+++ /dev/null
@@ -1,152 +0,0 @@
-// Auto-generated file. Do not edit!
-// Template: src/qs8-gemm/scalar.c.in
-// Generator: tools/xngen
-//
-// Copyright 2021 Google LLC
-//
-// This source code is licensed under the BSD-style license found in the
-// LICENSE file in the root directory of this source tree.
-
-#include <assert.h>
-#include <math.h>
-
-#include <xnnpack/math.h>
-#include <xnnpack/gemm.h>
-
-
-void xnn_qu8_gemm_minmax_fp32_ukernel_3x2__scalar_lrint(
- size_t mr,
- size_t nc,
- size_t kc,
- const uint8_t* restrict a,
- size_t a_stride,
- const void* restrict w,
- uint8_t* restrict c,
- size_t cm_stride,
- size_t cn_stride,
- const union xnn_qu8_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
-{
- assert(mr != 0);
- assert(mr <= 3);
- assert(nc != 0);
- assert(kc != 0);
-
- const uint8_t* a0 = a;
- uint8_t* c0 = c;
- const uint8_t* a1 = (const uint8_t*) ((uintptr_t) a0 + a_stride);
- uint8_t* c1 = (uint8_t*) ((uintptr_t) c0 + cm_stride);
- if XNN_UNPREDICTABLE(mr < 2) {
- a1 = a0;
- c1 = c0;
- }
- const uint8_t* a2 = (const uint8_t*) ((uintptr_t) a1 + a_stride);
- uint8_t* c2 = (uint8_t*) ((uintptr_t) c1 + cm_stride);
- if XNN_UNPREDICTABLE(mr <= 2) {
- a2 = a1;
- c2 = c1;
- }
-
- const int32_t vb_zero_point = params->fp32_scalar_lrint.kernel_zero_point;
- do {
- int32_t vacc0x0 = ((const int32_t*) w)[0];
- int32_t vacc0x1 = ((const int32_t*) w)[1];
- int32_t vacc1x0 = vacc0x0;
- int32_t vacc1x1 = vacc0x1;
- int32_t vacc2x0 = vacc0x0;
- int32_t vacc2x1 = vacc0x1;
- w = (const void*) ((const int32_t*) w + 2);
-
- size_t k = kc;
- do {
- const int32_t va0 = (int32_t) (uint32_t) *a0++;
- const int32_t va1 = (int32_t) (uint32_t) *a1++;
- const int32_t va2 = (int32_t) (uint32_t) *a2++;
-
- const int32_t vb0 = (int32_t) (uint32_t) ((const uint8_t*) w)[0] - vb_zero_point;
- const int32_t vb1 = (int32_t) (uint32_t) ((const uint8_t*) w)[1] - vb_zero_point;
- w = (const void*) ((const uint8_t*) w + 2);
-
- vacc0x0 += va0 * vb0;
- vacc0x1 += va0 * vb1;
- vacc1x0 += va1 * vb0;
- vacc1x1 += va1 * vb1;
- vacc2x0 += va2 * vb0;
- vacc2x1 += va2 * vb1;
-
- k -= sizeof(uint8_t);
- } while (k != 0);
-
- float vfpacc0x0 = (float) vacc0x0;
- float vfpacc0x1 = (float) vacc0x1;
- float vfpacc1x0 = (float) vacc1x0;
- float vfpacc1x1 = (float) vacc1x1;
- float vfpacc2x0 = (float) vacc2x0;
- float vfpacc2x1 = (float) vacc2x1;
-
- const float vscale = params->fp32_scalar_lrint.scale;
- vfpacc0x0 *= vscale;
- vfpacc0x1 *= vscale;
- vfpacc1x0 *= vscale;
- vfpacc1x1 *= vscale;
- vfpacc2x0 *= vscale;
- vfpacc2x1 *= vscale;
-
- long vrndacc0x0 = lrintf(vfpacc0x0);
- long vrndacc0x1 = lrintf(vfpacc0x1);
- long vrndacc1x0 = lrintf(vfpacc1x0);
- long vrndacc1x1 = lrintf(vfpacc1x1);
- long vrndacc2x0 = lrintf(vfpacc2x0);
- long vrndacc2x1 = lrintf(vfpacc2x1);
-
- const long voutput_min_less_zero_point = params->fp32_scalar_lrint.output_min_less_zero_point;
- vrndacc0x0 = XNN_UNPREDICTABLE(vrndacc0x0 < voutput_min_less_zero_point) ? voutput_min_less_zero_point : vrndacc0x0;
- vrndacc0x1 = XNN_UNPREDICTABLE(vrndacc0x1 < voutput_min_less_zero_point) ? voutput_min_less_zero_point : vrndacc0x1;
- vrndacc1x0 = XNN_UNPREDICTABLE(vrndacc1x0 < voutput_min_less_zero_point) ? voutput_min_less_zero_point : vrndacc1x0;
- vrndacc1x1 = XNN_UNPREDICTABLE(vrndacc1x1 < voutput_min_less_zero_point) ? voutput_min_less_zero_point : vrndacc1x1;
- vrndacc2x0 = XNN_UNPREDICTABLE(vrndacc2x0 < voutput_min_less_zero_point) ? voutput_min_less_zero_point : vrndacc2x0;
- vrndacc2x1 = XNN_UNPREDICTABLE(vrndacc2x1 < voutput_min_less_zero_point) ? voutput_min_less_zero_point : vrndacc2x1;
-
- const long voutput_max_less_zero_point = params->fp32_scalar_lrint.output_max_less_zero_point;
- vrndacc0x0 = XNN_UNPREDICTABLE(vrndacc0x0 > voutput_max_less_zero_point) ? voutput_max_less_zero_point : vrndacc0x0;
- vrndacc0x1 = XNN_UNPREDICTABLE(vrndacc0x1 > voutput_max_less_zero_point) ? voutput_max_less_zero_point : vrndacc0x1;
- vrndacc1x0 = XNN_UNPREDICTABLE(vrndacc1x0 > voutput_max_less_zero_point) ? voutput_max_less_zero_point : vrndacc1x0;
- vrndacc1x1 = XNN_UNPREDICTABLE(vrndacc1x1 > voutput_max_less_zero_point) ? voutput_max_less_zero_point : vrndacc1x1;
- vrndacc2x0 = XNN_UNPREDICTABLE(vrndacc2x0 > voutput_max_less_zero_point) ? voutput_max_less_zero_point : vrndacc2x0;
- vrndacc2x1 = XNN_UNPREDICTABLE(vrndacc2x1 > voutput_max_less_zero_point) ? voutput_max_less_zero_point : vrndacc2x1;
-
- const int32_t voutput_zero_point = params->fp32_scalar_lrint.output_zero_point;
- int32_t vout0x0 = (int32_t) vrndacc0x0 + voutput_zero_point;
- int32_t vout0x1 = (int32_t) vrndacc0x1 + voutput_zero_point;
- int32_t vout1x0 = (int32_t) vrndacc1x0 + voutput_zero_point;
- int32_t vout1x1 = (int32_t) vrndacc1x1 + voutput_zero_point;
- int32_t vout2x0 = (int32_t) vrndacc2x0 + voutput_zero_point;
- int32_t vout2x1 = (int32_t) vrndacc2x1 + voutput_zero_point;
-
- if XNN_LIKELY(nc >= 2) {
- c0[0] = (uint8_t) vout0x0;
- c0[1] = (uint8_t) vout0x1;
- c1[0] = (uint8_t) vout1x0;
- c1[1] = (uint8_t) vout1x1;
- c2[0] = (uint8_t) vout2x0;
- c2[1] = (uint8_t) vout2x1;
-
- a0 = (const uint8_t*) ((uintptr_t) a0 - kc);
- a1 = (const uint8_t*) ((uintptr_t) a1 - kc);
- a2 = (const uint8_t*) ((uintptr_t) a2 - kc);
-
- c0 = (uint8_t*) ((uintptr_t) c0 + cn_stride);
- c1 = (uint8_t*) ((uintptr_t) c1 + cn_stride);
- c2 = (uint8_t*) ((uintptr_t) c2 + cn_stride);
-
- nc -= 2;
- } else {
- if (nc & 1) {
- c0[0] = (uint8_t) vout0x0;
- c1[0] = (uint8_t) vout1x0;
- c2[0] = (uint8_t) vout2x0;
- }
-
- nc = 0;
- }
- } while (nc != 0);
-}
diff --git a/src/qu8-gemm/gen/3x2-minmax-fp32-scalar-magic.c b/src/qu8-gemm/gen/3x2-minmax-fp32-scalar-lrintf.c
similarity index 77%
copy from src/qu8-gemm/gen/3x2-minmax-fp32-scalar-magic.c
copy to src/qu8-gemm/gen/3x2-minmax-fp32-scalar-lrintf.c
index bcc7787..60f2ee6 100644
--- a/src/qu8-gemm/gen/3x2-minmax-fp32-scalar-magic.c
+++ b/src/qu8-gemm/gen/3x2-minmax-fp32-scalar-lrintf.c
@@ -8,14 +8,13 @@
// LICENSE file in the root directory of this source tree.
#include <assert.h>
-
-#include <fp16.h>
+#include <math.h>
#include <xnnpack/math.h>
#include <xnnpack/gemm.h>
-void xnn_qu8_gemm_minmax_fp32_ukernel_3x2__scalar_magic(
+void xnn_qu8_gemm_minmax_fp32_ukernel_3x2__scalar_lrintf(
size_t mr,
size_t nc,
size_t kc,
@@ -47,7 +46,7 @@
c2 = c1;
}
- const int32_t vb_zero_point = params->fp32_scalar_magic.kernel_zero_point;
+ const int32_t vb_zero_point = params->fp32_scalar_lrintf.kernel_zero_point;
do {
int32_t vacc0x0 = ((const int32_t*) w)[0];
int32_t vacc0x1 = ((const int32_t*) w)[1];
@@ -84,7 +83,7 @@
float vfpacc2x0 = (float) vacc2x0;
float vfpacc2x1 = (float) vacc2x1;
- const float vscale = params->fp32_scalar_magic.scale;
+ const float vscale = params->fp32_scalar_lrintf.scale;
vfpacc0x0 *= vscale;
vfpacc0x1 *= vscale;
vfpacc1x0 *= vscale;
@@ -92,7 +91,7 @@
vfpacc2x0 *= vscale;
vfpacc2x1 *= vscale;
- const float voutput_min_less_zero_point = params->fp32_scalar_magic.output_min_less_zero_point;
+ const float voutput_min_less_zero_point = params->fp32_scalar_lrintf.output_min_less_zero_point;
vfpacc0x0 = math_max_f32(vfpacc0x0, voutput_min_less_zero_point);
vfpacc0x1 = math_max_f32(vfpacc0x1, voutput_min_less_zero_point);
vfpacc1x0 = math_max_f32(vfpacc1x0, voutput_min_less_zero_point);
@@ -100,7 +99,7 @@
vfpacc2x0 = math_max_f32(vfpacc2x0, voutput_min_less_zero_point);
vfpacc2x1 = math_max_f32(vfpacc2x1, voutput_min_less_zero_point);
- const float voutput_max_less_zero_point = params->fp32_scalar_magic.output_max_less_zero_point;
+ const float voutput_max_less_zero_point = params->fp32_scalar_lrintf.output_max_less_zero_point;
vfpacc0x0 = math_min_f32(vfpacc0x0, voutput_max_less_zero_point);
vfpacc0x1 = math_min_f32(vfpacc0x1, voutput_max_less_zero_point);
vfpacc1x0 = math_min_f32(vfpacc1x0, voutput_max_less_zero_point);
@@ -108,21 +107,20 @@
vfpacc2x0 = math_min_f32(vfpacc2x0, voutput_max_less_zero_point);
vfpacc2x1 = math_min_f32(vfpacc2x1, voutput_max_less_zero_point);
- const float vmagic_bias = params->fp32_scalar_magic.magic_bias;
- vfpacc0x0 += vmagic_bias;
- vfpacc0x1 += vmagic_bias;
- vfpacc1x0 += vmagic_bias;
- vfpacc1x1 += vmagic_bias;
- vfpacc2x0 += vmagic_bias;
- vfpacc2x1 += vmagic_bias;
+ const int32_t vrndacc0x0 = (int32_t) lrintf(vfpacc0x0);
+ const int32_t vrndacc0x1 = (int32_t) lrintf(vfpacc0x1);
+ const int32_t vrndacc1x0 = (int32_t) lrintf(vfpacc1x0);
+ const int32_t vrndacc1x1 = (int32_t) lrintf(vfpacc1x1);
+ const int32_t vrndacc2x0 = (int32_t) lrintf(vfpacc2x0);
+ const int32_t vrndacc2x1 = (int32_t) lrintf(vfpacc2x1);
- const int32_t vmagic_bias_less_output_zero_point = params->fp32_scalar_magic.magic_bias_less_output_zero_point;
- int32_t vout0x0 = (int32_t) fp32_to_bits(vfpacc0x0) - vmagic_bias_less_output_zero_point;
- int32_t vout0x1 = (int32_t) fp32_to_bits(vfpacc0x1) - vmagic_bias_less_output_zero_point;
- int32_t vout1x0 = (int32_t) fp32_to_bits(vfpacc1x0) - vmagic_bias_less_output_zero_point;
- int32_t vout1x1 = (int32_t) fp32_to_bits(vfpacc1x1) - vmagic_bias_less_output_zero_point;
- int32_t vout2x0 = (int32_t) fp32_to_bits(vfpacc2x0) - vmagic_bias_less_output_zero_point;
- int32_t vout2x1 = (int32_t) fp32_to_bits(vfpacc2x1) - vmagic_bias_less_output_zero_point;
+ const int32_t voutput_zero_point = params->fp32_scalar_lrintf.output_zero_point;
+ int32_t vout0x0 = vrndacc0x0 + voutput_zero_point;
+ int32_t vout0x1 = vrndacc0x1 + voutput_zero_point;
+ int32_t vout1x0 = vrndacc1x0 + voutput_zero_point;
+ int32_t vout1x1 = vrndacc1x1 + voutput_zero_point;
+ int32_t vout2x0 = vrndacc2x0 + voutput_zero_point;
+ int32_t vout2x1 = vrndacc2x1 + voutput_zero_point;
if XNN_LIKELY(nc >= 2) {
c0[0] = (uint8_t) vout0x0;
diff --git a/src/qu8-gemm/gen/3x4-minmax-fp32-scalar-magic.c b/src/qu8-gemm/gen/3x4-minmax-fp32-scalar-fmagic.c
similarity index 95%
rename from src/qu8-gemm/gen/3x4-minmax-fp32-scalar-magic.c
rename to src/qu8-gemm/gen/3x4-minmax-fp32-scalar-fmagic.c
index 0c8926a..2c7046d 100644
--- a/src/qu8-gemm/gen/3x4-minmax-fp32-scalar-magic.c
+++ b/src/qu8-gemm/gen/3x4-minmax-fp32-scalar-fmagic.c
@@ -15,7 +15,7 @@
#include <xnnpack/gemm.h>
-void xnn_qu8_gemm_minmax_fp32_ukernel_3x4__scalar_magic(
+void xnn_qu8_gemm_minmax_fp32_ukernel_3x4__scalar_fmagic(
size_t mr,
size_t nc,
size_t kc,
@@ -47,7 +47,7 @@
c2 = c1;
}
- const int32_t vb_zero_point = params->fp32_scalar_magic.kernel_zero_point;
+ const int32_t vb_zero_point = params->fp32_scalar_fmagic.kernel_zero_point;
do {
int32_t vacc0x0 = ((const int32_t*) w)[0];
int32_t vacc0x1 = ((const int32_t*) w)[1];
@@ -104,7 +104,7 @@
float vfpacc2x2 = (float) vacc2x2;
float vfpacc2x3 = (float) vacc2x3;
- const float vscale = params->fp32_scalar_magic.scale;
+ const float vscale = params->fp32_scalar_fmagic.scale;
vfpacc0x0 *= vscale;
vfpacc0x1 *= vscale;
vfpacc0x2 *= vscale;
@@ -118,7 +118,7 @@
vfpacc2x2 *= vscale;
vfpacc2x3 *= vscale;
- const float voutput_min_less_zero_point = params->fp32_scalar_magic.output_min_less_zero_point;
+ const float voutput_min_less_zero_point = params->fp32_scalar_fmagic.output_min_less_zero_point;
vfpacc0x0 = math_max_f32(vfpacc0x0, voutput_min_less_zero_point);
vfpacc0x1 = math_max_f32(vfpacc0x1, voutput_min_less_zero_point);
vfpacc0x2 = math_max_f32(vfpacc0x2, voutput_min_less_zero_point);
@@ -132,7 +132,7 @@
vfpacc2x2 = math_max_f32(vfpacc2x2, voutput_min_less_zero_point);
vfpacc2x3 = math_max_f32(vfpacc2x3, voutput_min_less_zero_point);
- const float voutput_max_less_zero_point = params->fp32_scalar_magic.output_max_less_zero_point;
+ const float voutput_max_less_zero_point = params->fp32_scalar_fmagic.output_max_less_zero_point;
vfpacc0x0 = math_min_f32(vfpacc0x0, voutput_max_less_zero_point);
vfpacc0x1 = math_min_f32(vfpacc0x1, voutput_max_less_zero_point);
vfpacc0x2 = math_min_f32(vfpacc0x2, voutput_max_less_zero_point);
@@ -146,7 +146,7 @@
vfpacc2x2 = math_min_f32(vfpacc2x2, voutput_max_less_zero_point);
vfpacc2x3 = math_min_f32(vfpacc2x3, voutput_max_less_zero_point);
- const float vmagic_bias = params->fp32_scalar_magic.magic_bias;
+ const float vmagic_bias = params->fp32_scalar_fmagic.magic_bias;
vfpacc0x0 += vmagic_bias;
vfpacc0x1 += vmagic_bias;
vfpacc0x2 += vmagic_bias;
@@ -160,7 +160,7 @@
vfpacc2x2 += vmagic_bias;
vfpacc2x3 += vmagic_bias;
- const int32_t vmagic_bias_less_output_zero_point = params->fp32_scalar_magic.magic_bias_less_output_zero_point;
+ const int32_t vmagic_bias_less_output_zero_point = params->fp32_scalar_fmagic.magic_bias_less_output_zero_point;
int32_t vout0x0 = (int32_t) fp32_to_bits(vfpacc0x0) - vmagic_bias_less_output_zero_point;
int32_t vout0x1 = (int32_t) fp32_to_bits(vfpacc0x1) - vmagic_bias_less_output_zero_point;
int32_t vout0x2 = (int32_t) fp32_to_bits(vfpacc0x2) - vmagic_bias_less_output_zero_point;
diff --git a/src/qu8-gemm/gen/3x4-minmax-fp32-scalar-lrint.c b/src/qu8-gemm/gen/3x4-minmax-fp32-scalar-lrint.c
deleted file mode 100644
index 6523cdf..0000000
--- a/src/qu8-gemm/gen/3x4-minmax-fp32-scalar-lrint.c
+++ /dev/null
@@ -1,222 +0,0 @@
-// Auto-generated file. Do not edit!
-// Template: src/qs8-gemm/scalar.c.in
-// Generator: tools/xngen
-//
-// Copyright 2021 Google LLC
-//
-// This source code is licensed under the BSD-style license found in the
-// LICENSE file in the root directory of this source tree.
-
-#include <assert.h>
-#include <math.h>
-
-#include <xnnpack/math.h>
-#include <xnnpack/gemm.h>
-
-
-void xnn_qu8_gemm_minmax_fp32_ukernel_3x4__scalar_lrint(
- size_t mr,
- size_t nc,
- size_t kc,
- const uint8_t* restrict a,
- size_t a_stride,
- const void* restrict w,
- uint8_t* restrict c,
- size_t cm_stride,
- size_t cn_stride,
- const union xnn_qu8_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
-{
- assert(mr != 0);
- assert(mr <= 3);
- assert(nc != 0);
- assert(kc != 0);
-
- const uint8_t* a0 = a;
- uint8_t* c0 = c;
- const uint8_t* a1 = (const uint8_t*) ((uintptr_t) a0 + a_stride);
- uint8_t* c1 = (uint8_t*) ((uintptr_t) c0 + cm_stride);
- if XNN_UNPREDICTABLE(mr < 2) {
- a1 = a0;
- c1 = c0;
- }
- const uint8_t* a2 = (const uint8_t*) ((uintptr_t) a1 + a_stride);
- uint8_t* c2 = (uint8_t*) ((uintptr_t) c1 + cm_stride);
- if XNN_UNPREDICTABLE(mr <= 2) {
- a2 = a1;
- c2 = c1;
- }
-
- const int32_t vb_zero_point = params->fp32_scalar_lrint.kernel_zero_point;
- do {
- int32_t vacc0x0 = ((const int32_t*) w)[0];
- int32_t vacc0x1 = ((const int32_t*) w)[1];
- int32_t vacc0x2 = ((const int32_t*) w)[2];
- int32_t vacc0x3 = ((const int32_t*) w)[3];
- int32_t vacc1x0 = vacc0x0;
- int32_t vacc1x1 = vacc0x1;
- int32_t vacc1x2 = vacc0x2;
- int32_t vacc1x3 = vacc0x3;
- int32_t vacc2x0 = vacc0x0;
- int32_t vacc2x1 = vacc0x1;
- int32_t vacc2x2 = vacc0x2;
- int32_t vacc2x3 = vacc0x3;
- w = (const void*) ((const int32_t*) w + 4);
-
- size_t k = kc;
- do {
- const int32_t va0 = (int32_t) (uint32_t) *a0++;
- const int32_t va1 = (int32_t) (uint32_t) *a1++;
- const int32_t va2 = (int32_t) (uint32_t) *a2++;
-
- const int32_t vb0 = (int32_t) (uint32_t) ((const uint8_t*) w)[0] - vb_zero_point;
- const int32_t vb1 = (int32_t) (uint32_t) ((const uint8_t*) w)[1] - vb_zero_point;
- const int32_t vb2 = (int32_t) (uint32_t) ((const uint8_t*) w)[2] - vb_zero_point;
- const int32_t vb3 = (int32_t) (uint32_t) ((const uint8_t*) w)[3] - vb_zero_point;
- w = (const void*) ((const uint8_t*) w + 4);
-
- vacc0x0 += va0 * vb0;
- vacc0x1 += va0 * vb1;
- vacc0x2 += va0 * vb2;
- vacc0x3 += va0 * vb3;
- vacc1x0 += va1 * vb0;
- vacc1x1 += va1 * vb1;
- vacc1x2 += va1 * vb2;
- vacc1x3 += va1 * vb3;
- vacc2x0 += va2 * vb0;
- vacc2x1 += va2 * vb1;
- vacc2x2 += va2 * vb2;
- vacc2x3 += va2 * vb3;
-
- k -= sizeof(uint8_t);
- } while (k != 0);
-
- float vfpacc0x0 = (float) vacc0x0;
- float vfpacc0x1 = (float) vacc0x1;
- float vfpacc0x2 = (float) vacc0x2;
- float vfpacc0x3 = (float) vacc0x3;
- float vfpacc1x0 = (float) vacc1x0;
- float vfpacc1x1 = (float) vacc1x1;
- float vfpacc1x2 = (float) vacc1x2;
- float vfpacc1x3 = (float) vacc1x3;
- float vfpacc2x0 = (float) vacc2x0;
- float vfpacc2x1 = (float) vacc2x1;
- float vfpacc2x2 = (float) vacc2x2;
- float vfpacc2x3 = (float) vacc2x3;
-
- const float vscale = params->fp32_scalar_lrint.scale;
- vfpacc0x0 *= vscale;
- vfpacc0x1 *= vscale;
- vfpacc0x2 *= vscale;
- vfpacc0x3 *= vscale;
- vfpacc1x0 *= vscale;
- vfpacc1x1 *= vscale;
- vfpacc1x2 *= vscale;
- vfpacc1x3 *= vscale;
- vfpacc2x0 *= vscale;
- vfpacc2x1 *= vscale;
- vfpacc2x2 *= vscale;
- vfpacc2x3 *= vscale;
-
- long vrndacc0x0 = lrintf(vfpacc0x0);
- long vrndacc0x1 = lrintf(vfpacc0x1);
- long vrndacc0x2 = lrintf(vfpacc0x2);
- long vrndacc0x3 = lrintf(vfpacc0x3);
- long vrndacc1x0 = lrintf(vfpacc1x0);
- long vrndacc1x1 = lrintf(vfpacc1x1);
- long vrndacc1x2 = lrintf(vfpacc1x2);
- long vrndacc1x3 = lrintf(vfpacc1x3);
- long vrndacc2x0 = lrintf(vfpacc2x0);
- long vrndacc2x1 = lrintf(vfpacc2x1);
- long vrndacc2x2 = lrintf(vfpacc2x2);
- long vrndacc2x3 = lrintf(vfpacc2x3);
-
- const long voutput_min_less_zero_point = params->fp32_scalar_lrint.output_min_less_zero_point;
- vrndacc0x0 = XNN_UNPREDICTABLE(vrndacc0x0 < voutput_min_less_zero_point) ? voutput_min_less_zero_point : vrndacc0x0;
- vrndacc0x1 = XNN_UNPREDICTABLE(vrndacc0x1 < voutput_min_less_zero_point) ? voutput_min_less_zero_point : vrndacc0x1;
- vrndacc0x2 = XNN_UNPREDICTABLE(vrndacc0x2 < voutput_min_less_zero_point) ? voutput_min_less_zero_point : vrndacc0x2;
- vrndacc0x3 = XNN_UNPREDICTABLE(vrndacc0x3 < voutput_min_less_zero_point) ? voutput_min_less_zero_point : vrndacc0x3;
- vrndacc1x0 = XNN_UNPREDICTABLE(vrndacc1x0 < voutput_min_less_zero_point) ? voutput_min_less_zero_point : vrndacc1x0;
- vrndacc1x1 = XNN_UNPREDICTABLE(vrndacc1x1 < voutput_min_less_zero_point) ? voutput_min_less_zero_point : vrndacc1x1;
- vrndacc1x2 = XNN_UNPREDICTABLE(vrndacc1x2 < voutput_min_less_zero_point) ? voutput_min_less_zero_point : vrndacc1x2;
- vrndacc1x3 = XNN_UNPREDICTABLE(vrndacc1x3 < voutput_min_less_zero_point) ? voutput_min_less_zero_point : vrndacc1x3;
- vrndacc2x0 = XNN_UNPREDICTABLE(vrndacc2x0 < voutput_min_less_zero_point) ? voutput_min_less_zero_point : vrndacc2x0;
- vrndacc2x1 = XNN_UNPREDICTABLE(vrndacc2x1 < voutput_min_less_zero_point) ? voutput_min_less_zero_point : vrndacc2x1;
- vrndacc2x2 = XNN_UNPREDICTABLE(vrndacc2x2 < voutput_min_less_zero_point) ? voutput_min_less_zero_point : vrndacc2x2;
- vrndacc2x3 = XNN_UNPREDICTABLE(vrndacc2x3 < voutput_min_less_zero_point) ? voutput_min_less_zero_point : vrndacc2x3;
-
- const long voutput_max_less_zero_point = params->fp32_scalar_lrint.output_max_less_zero_point;
- vrndacc0x0 = XNN_UNPREDICTABLE(vrndacc0x0 > voutput_max_less_zero_point) ? voutput_max_less_zero_point : vrndacc0x0;
- vrndacc0x1 = XNN_UNPREDICTABLE(vrndacc0x1 > voutput_max_less_zero_point) ? voutput_max_less_zero_point : vrndacc0x1;
- vrndacc0x2 = XNN_UNPREDICTABLE(vrndacc0x2 > voutput_max_less_zero_point) ? voutput_max_less_zero_point : vrndacc0x2;
- vrndacc0x3 = XNN_UNPREDICTABLE(vrndacc0x3 > voutput_max_less_zero_point) ? voutput_max_less_zero_point : vrndacc0x3;
- vrndacc1x0 = XNN_UNPREDICTABLE(vrndacc1x0 > voutput_max_less_zero_point) ? voutput_max_less_zero_point : vrndacc1x0;
- vrndacc1x1 = XNN_UNPREDICTABLE(vrndacc1x1 > voutput_max_less_zero_point) ? voutput_max_less_zero_point : vrndacc1x1;
- vrndacc1x2 = XNN_UNPREDICTABLE(vrndacc1x2 > voutput_max_less_zero_point) ? voutput_max_less_zero_point : vrndacc1x2;
- vrndacc1x3 = XNN_UNPREDICTABLE(vrndacc1x3 > voutput_max_less_zero_point) ? voutput_max_less_zero_point : vrndacc1x3;
- vrndacc2x0 = XNN_UNPREDICTABLE(vrndacc2x0 > voutput_max_less_zero_point) ? voutput_max_less_zero_point : vrndacc2x0;
- vrndacc2x1 = XNN_UNPREDICTABLE(vrndacc2x1 > voutput_max_less_zero_point) ? voutput_max_less_zero_point : vrndacc2x1;
- vrndacc2x2 = XNN_UNPREDICTABLE(vrndacc2x2 > voutput_max_less_zero_point) ? voutput_max_less_zero_point : vrndacc2x2;
- vrndacc2x3 = XNN_UNPREDICTABLE(vrndacc2x3 > voutput_max_less_zero_point) ? voutput_max_less_zero_point : vrndacc2x3;
-
- const int32_t voutput_zero_point = params->fp32_scalar_lrint.output_zero_point;
- int32_t vout0x0 = (int32_t) vrndacc0x0 + voutput_zero_point;
- int32_t vout0x1 = (int32_t) vrndacc0x1 + voutput_zero_point;
- int32_t vout0x2 = (int32_t) vrndacc0x2 + voutput_zero_point;
- int32_t vout0x3 = (int32_t) vrndacc0x3 + voutput_zero_point;
- int32_t vout1x0 = (int32_t) vrndacc1x0 + voutput_zero_point;
- int32_t vout1x1 = (int32_t) vrndacc1x1 + voutput_zero_point;
- int32_t vout1x2 = (int32_t) vrndacc1x2 + voutput_zero_point;
- int32_t vout1x3 = (int32_t) vrndacc1x3 + voutput_zero_point;
- int32_t vout2x0 = (int32_t) vrndacc2x0 + voutput_zero_point;
- int32_t vout2x1 = (int32_t) vrndacc2x1 + voutput_zero_point;
- int32_t vout2x2 = (int32_t) vrndacc2x2 + voutput_zero_point;
- int32_t vout2x3 = (int32_t) vrndacc2x3 + voutput_zero_point;
-
- if XNN_LIKELY(nc >= 4) {
- c0[0] = (uint8_t) vout0x0;
- c0[1] = (uint8_t) vout0x1;
- c0[2] = (uint8_t) vout0x2;
- c0[3] = (uint8_t) vout0x3;
- c1[0] = (uint8_t) vout1x0;
- c1[1] = (uint8_t) vout1x1;
- c1[2] = (uint8_t) vout1x2;
- c1[3] = (uint8_t) vout1x3;
- c2[0] = (uint8_t) vout2x0;
- c2[1] = (uint8_t) vout2x1;
- c2[2] = (uint8_t) vout2x2;
- c2[3] = (uint8_t) vout2x3;
-
- a0 = (const uint8_t*) ((uintptr_t) a0 - kc);
- a1 = (const uint8_t*) ((uintptr_t) a1 - kc);
- a2 = (const uint8_t*) ((uintptr_t) a2 - kc);
-
- c0 = (uint8_t*) ((uintptr_t) c0 + cn_stride);
- c1 = (uint8_t*) ((uintptr_t) c1 + cn_stride);
- c2 = (uint8_t*) ((uintptr_t) c2 + cn_stride);
-
- nc -= 4;
- } else {
- if (nc & 2) {
- c0[0] = (uint8_t) vout0x0;
- c0[1] = (uint8_t) vout0x1;
- vout0x0 = vout0x2;
- c0 += 2;
- c1[0] = (uint8_t) vout1x0;
- c1[1] = (uint8_t) vout1x1;
- vout1x0 = vout1x2;
- c1 += 2;
- c2[0] = (uint8_t) vout2x0;
- c2[1] = (uint8_t) vout2x1;
- vout2x0 = vout2x2;
- c2 += 2;
- }
- if (nc & 1) {
- c0[0] = (uint8_t) vout0x0;
- c1[0] = (uint8_t) vout1x0;
- c2[0] = (uint8_t) vout2x0;
- }
-
- nc = 0;
- }
- } while (nc != 0);
-}
diff --git a/src/qu8-gemm/gen/3x4-minmax-fp32-scalar-magic.c b/src/qu8-gemm/gen/3x4-minmax-fp32-scalar-lrintf.c
similarity index 76%
copy from src/qu8-gemm/gen/3x4-minmax-fp32-scalar-magic.c
copy to src/qu8-gemm/gen/3x4-minmax-fp32-scalar-lrintf.c
index 0c8926a..779963c 100644
--- a/src/qu8-gemm/gen/3x4-minmax-fp32-scalar-magic.c
+++ b/src/qu8-gemm/gen/3x4-minmax-fp32-scalar-lrintf.c
@@ -8,14 +8,13 @@
// LICENSE file in the root directory of this source tree.
#include <assert.h>
-
-#include <fp16.h>
+#include <math.h>
#include <xnnpack/math.h>
#include <xnnpack/gemm.h>
-void xnn_qu8_gemm_minmax_fp32_ukernel_3x4__scalar_magic(
+void xnn_qu8_gemm_minmax_fp32_ukernel_3x4__scalar_lrintf(
size_t mr,
size_t nc,
size_t kc,
@@ -47,7 +46,7 @@
c2 = c1;
}
- const int32_t vb_zero_point = params->fp32_scalar_magic.kernel_zero_point;
+ const int32_t vb_zero_point = params->fp32_scalar_lrintf.kernel_zero_point;
do {
int32_t vacc0x0 = ((const int32_t*) w)[0];
int32_t vacc0x1 = ((const int32_t*) w)[1];
@@ -104,7 +103,7 @@
float vfpacc2x2 = (float) vacc2x2;
float vfpacc2x3 = (float) vacc2x3;
- const float vscale = params->fp32_scalar_magic.scale;
+ const float vscale = params->fp32_scalar_lrintf.scale;
vfpacc0x0 *= vscale;
vfpacc0x1 *= vscale;
vfpacc0x2 *= vscale;
@@ -118,7 +117,7 @@
vfpacc2x2 *= vscale;
vfpacc2x3 *= vscale;
- const float voutput_min_less_zero_point = params->fp32_scalar_magic.output_min_less_zero_point;
+ const float voutput_min_less_zero_point = params->fp32_scalar_lrintf.output_min_less_zero_point;
vfpacc0x0 = math_max_f32(vfpacc0x0, voutput_min_less_zero_point);
vfpacc0x1 = math_max_f32(vfpacc0x1, voutput_min_less_zero_point);
vfpacc0x2 = math_max_f32(vfpacc0x2, voutput_min_less_zero_point);
@@ -132,7 +131,7 @@
vfpacc2x2 = math_max_f32(vfpacc2x2, voutput_min_less_zero_point);
vfpacc2x3 = math_max_f32(vfpacc2x3, voutput_min_less_zero_point);
- const float voutput_max_less_zero_point = params->fp32_scalar_magic.output_max_less_zero_point;
+ const float voutput_max_less_zero_point = params->fp32_scalar_lrintf.output_max_less_zero_point;
vfpacc0x0 = math_min_f32(vfpacc0x0, voutput_max_less_zero_point);
vfpacc0x1 = math_min_f32(vfpacc0x1, voutput_max_less_zero_point);
vfpacc0x2 = math_min_f32(vfpacc0x2, voutput_max_less_zero_point);
@@ -146,33 +145,32 @@
vfpacc2x2 = math_min_f32(vfpacc2x2, voutput_max_less_zero_point);
vfpacc2x3 = math_min_f32(vfpacc2x3, voutput_max_less_zero_point);
- const float vmagic_bias = params->fp32_scalar_magic.magic_bias;
- vfpacc0x0 += vmagic_bias;
- vfpacc0x1 += vmagic_bias;
- vfpacc0x2 += vmagic_bias;
- vfpacc0x3 += vmagic_bias;
- vfpacc1x0 += vmagic_bias;
- vfpacc1x1 += vmagic_bias;
- vfpacc1x2 += vmagic_bias;
- vfpacc1x3 += vmagic_bias;
- vfpacc2x0 += vmagic_bias;
- vfpacc2x1 += vmagic_bias;
- vfpacc2x2 += vmagic_bias;
- vfpacc2x3 += vmagic_bias;
+ const int32_t vrndacc0x0 = (int32_t) lrintf(vfpacc0x0);
+ const int32_t vrndacc0x1 = (int32_t) lrintf(vfpacc0x1);
+ const int32_t vrndacc0x2 = (int32_t) lrintf(vfpacc0x2);
+ const int32_t vrndacc0x3 = (int32_t) lrintf(vfpacc0x3);
+ const int32_t vrndacc1x0 = (int32_t) lrintf(vfpacc1x0);
+ const int32_t vrndacc1x1 = (int32_t) lrintf(vfpacc1x1);
+ const int32_t vrndacc1x2 = (int32_t) lrintf(vfpacc1x2);
+ const int32_t vrndacc1x3 = (int32_t) lrintf(vfpacc1x3);
+ const int32_t vrndacc2x0 = (int32_t) lrintf(vfpacc2x0);
+ const int32_t vrndacc2x1 = (int32_t) lrintf(vfpacc2x1);
+ const int32_t vrndacc2x2 = (int32_t) lrintf(vfpacc2x2);
+ const int32_t vrndacc2x3 = (int32_t) lrintf(vfpacc2x3);
- const int32_t vmagic_bias_less_output_zero_point = params->fp32_scalar_magic.magic_bias_less_output_zero_point;
- int32_t vout0x0 = (int32_t) fp32_to_bits(vfpacc0x0) - vmagic_bias_less_output_zero_point;
- int32_t vout0x1 = (int32_t) fp32_to_bits(vfpacc0x1) - vmagic_bias_less_output_zero_point;
- int32_t vout0x2 = (int32_t) fp32_to_bits(vfpacc0x2) - vmagic_bias_less_output_zero_point;
- int32_t vout0x3 = (int32_t) fp32_to_bits(vfpacc0x3) - vmagic_bias_less_output_zero_point;
- int32_t vout1x0 = (int32_t) fp32_to_bits(vfpacc1x0) - vmagic_bias_less_output_zero_point;
- int32_t vout1x1 = (int32_t) fp32_to_bits(vfpacc1x1) - vmagic_bias_less_output_zero_point;
- int32_t vout1x2 = (int32_t) fp32_to_bits(vfpacc1x2) - vmagic_bias_less_output_zero_point;
- int32_t vout1x3 = (int32_t) fp32_to_bits(vfpacc1x3) - vmagic_bias_less_output_zero_point;
- int32_t vout2x0 = (int32_t) fp32_to_bits(vfpacc2x0) - vmagic_bias_less_output_zero_point;
- int32_t vout2x1 = (int32_t) fp32_to_bits(vfpacc2x1) - vmagic_bias_less_output_zero_point;
- int32_t vout2x2 = (int32_t) fp32_to_bits(vfpacc2x2) - vmagic_bias_less_output_zero_point;
- int32_t vout2x3 = (int32_t) fp32_to_bits(vfpacc2x3) - vmagic_bias_less_output_zero_point;
+ const int32_t voutput_zero_point = params->fp32_scalar_lrintf.output_zero_point;
+ int32_t vout0x0 = vrndacc0x0 + voutput_zero_point;
+ int32_t vout0x1 = vrndacc0x1 + voutput_zero_point;
+ int32_t vout0x2 = vrndacc0x2 + voutput_zero_point;
+ int32_t vout0x3 = vrndacc0x3 + voutput_zero_point;
+ int32_t vout1x0 = vrndacc1x0 + voutput_zero_point;
+ int32_t vout1x1 = vrndacc1x1 + voutput_zero_point;
+ int32_t vout1x2 = vrndacc1x2 + voutput_zero_point;
+ int32_t vout1x3 = vrndacc1x3 + voutput_zero_point;
+ int32_t vout2x0 = vrndacc2x0 + voutput_zero_point;
+ int32_t vout2x1 = vrndacc2x1 + voutput_zero_point;
+ int32_t vout2x2 = vrndacc2x2 + voutput_zero_point;
+ int32_t vout2x3 = vrndacc2x3 + voutput_zero_point;
if XNN_LIKELY(nc >= 4) {
c0[0] = (uint8_t) vout0x0;
diff --git a/src/qu8-gemm/gen/4x2-minmax-fp32-scalar-magic.c b/src/qu8-gemm/gen/4x2-minmax-fp32-scalar-fmagic.c
similarity index 93%
rename from src/qu8-gemm/gen/4x2-minmax-fp32-scalar-magic.c
rename to src/qu8-gemm/gen/4x2-minmax-fp32-scalar-fmagic.c
index 559f820..1e1dd5a 100644
--- a/src/qu8-gemm/gen/4x2-minmax-fp32-scalar-magic.c
+++ b/src/qu8-gemm/gen/4x2-minmax-fp32-scalar-fmagic.c
@@ -15,7 +15,7 @@
#include <xnnpack/gemm.h>
-void xnn_qu8_gemm_minmax_fp32_ukernel_4x2__scalar_magic(
+void xnn_qu8_gemm_minmax_fp32_ukernel_4x2__scalar_fmagic(
size_t mr,
size_t nc,
size_t kc,
@@ -53,7 +53,7 @@
c3 = c2;
}
- const int32_t vb_zero_point = params->fp32_scalar_magic.kernel_zero_point;
+ const int32_t vb_zero_point = params->fp32_scalar_fmagic.kernel_zero_point;
do {
int32_t vacc0x0 = ((const int32_t*) w)[0];
int32_t vacc0x1 = ((const int32_t*) w)[1];
@@ -97,7 +97,7 @@
float vfpacc3x0 = (float) vacc3x0;
float vfpacc3x1 = (float) vacc3x1;
- const float vscale = params->fp32_scalar_magic.scale;
+ const float vscale = params->fp32_scalar_fmagic.scale;
vfpacc0x0 *= vscale;
vfpacc0x1 *= vscale;
vfpacc1x0 *= vscale;
@@ -107,7 +107,7 @@
vfpacc3x0 *= vscale;
vfpacc3x1 *= vscale;
- const float voutput_min_less_zero_point = params->fp32_scalar_magic.output_min_less_zero_point;
+ const float voutput_min_less_zero_point = params->fp32_scalar_fmagic.output_min_less_zero_point;
vfpacc0x0 = math_max_f32(vfpacc0x0, voutput_min_less_zero_point);
vfpacc0x1 = math_max_f32(vfpacc0x1, voutput_min_less_zero_point);
vfpacc1x0 = math_max_f32(vfpacc1x0, voutput_min_less_zero_point);
@@ -117,7 +117,7 @@
vfpacc3x0 = math_max_f32(vfpacc3x0, voutput_min_less_zero_point);
vfpacc3x1 = math_max_f32(vfpacc3x1, voutput_min_less_zero_point);
- const float voutput_max_less_zero_point = params->fp32_scalar_magic.output_max_less_zero_point;
+ const float voutput_max_less_zero_point = params->fp32_scalar_fmagic.output_max_less_zero_point;
vfpacc0x0 = math_min_f32(vfpacc0x0, voutput_max_less_zero_point);
vfpacc0x1 = math_min_f32(vfpacc0x1, voutput_max_less_zero_point);
vfpacc1x0 = math_min_f32(vfpacc1x0, voutput_max_less_zero_point);
@@ -127,7 +127,7 @@
vfpacc3x0 = math_min_f32(vfpacc3x0, voutput_max_less_zero_point);
vfpacc3x1 = math_min_f32(vfpacc3x1, voutput_max_less_zero_point);
- const float vmagic_bias = params->fp32_scalar_magic.magic_bias;
+ const float vmagic_bias = params->fp32_scalar_fmagic.magic_bias;
vfpacc0x0 += vmagic_bias;
vfpacc0x1 += vmagic_bias;
vfpacc1x0 += vmagic_bias;
@@ -137,7 +137,7 @@
vfpacc3x0 += vmagic_bias;
vfpacc3x1 += vmagic_bias;
- const int32_t vmagic_bias_less_output_zero_point = params->fp32_scalar_magic.magic_bias_less_output_zero_point;
+ const int32_t vmagic_bias_less_output_zero_point = params->fp32_scalar_fmagic.magic_bias_less_output_zero_point;
int32_t vout0x0 = (int32_t) fp32_to_bits(vfpacc0x0) - vmagic_bias_less_output_zero_point;
int32_t vout0x1 = (int32_t) fp32_to_bits(vfpacc0x1) - vmagic_bias_less_output_zero_point;
int32_t vout1x0 = (int32_t) fp32_to_bits(vfpacc1x0) - vmagic_bias_less_output_zero_point;
diff --git a/src/qu8-gemm/gen/4x2-minmax-fp32-scalar-lrint.c b/src/qu8-gemm/gen/4x2-minmax-fp32-scalar-lrint.c
deleted file mode 100644
index f6f443b..0000000
--- a/src/qu8-gemm/gen/4x2-minmax-fp32-scalar-lrint.c
+++ /dev/null
@@ -1,180 +0,0 @@
-// Auto-generated file. Do not edit!
-// Template: src/qs8-gemm/scalar.c.in
-// Generator: tools/xngen
-//
-// Copyright 2021 Google LLC
-//
-// This source code is licensed under the BSD-style license found in the
-// LICENSE file in the root directory of this source tree.
-
-#include <assert.h>
-#include <math.h>
-
-#include <xnnpack/math.h>
-#include <xnnpack/gemm.h>
-
-
-void xnn_qu8_gemm_minmax_fp32_ukernel_4x2__scalar_lrint(
- size_t mr,
- size_t nc,
- size_t kc,
- const uint8_t* restrict a,
- size_t a_stride,
- const void* restrict w,
- uint8_t* restrict c,
- size_t cm_stride,
- size_t cn_stride,
- const union xnn_qu8_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
-{
- assert(mr != 0);
- assert(mr <= 4);
- assert(nc != 0);
- assert(kc != 0);
-
- const uint8_t* a0 = a;
- uint8_t* c0 = c;
- const uint8_t* a1 = (const uint8_t*) ((uintptr_t) a0 + a_stride);
- uint8_t* c1 = (uint8_t*) ((uintptr_t) c0 + cm_stride);
- if XNN_UNPREDICTABLE(mr < 2) {
- a1 = a0;
- c1 = c0;
- }
- const uint8_t* a2 = (const uint8_t*) ((uintptr_t) a1 + a_stride);
- uint8_t* c2 = (uint8_t*) ((uintptr_t) c1 + cm_stride);
- if XNN_UNPREDICTABLE(mr <= 2) {
- a2 = a1;
- c2 = c1;
- }
- const uint8_t* a3 = (const uint8_t*) ((uintptr_t) a2 + a_stride);
- uint8_t* c3 = (uint8_t*) ((uintptr_t) c2 + cm_stride);
- if XNN_UNPREDICTABLE(mr != 4) {
- a3 = a2;
- c3 = c2;
- }
-
- const int32_t vb_zero_point = params->fp32_scalar_lrint.kernel_zero_point;
- do {
- int32_t vacc0x0 = ((const int32_t*) w)[0];
- int32_t vacc0x1 = ((const int32_t*) w)[1];
- int32_t vacc1x0 = vacc0x0;
- int32_t vacc1x1 = vacc0x1;
- int32_t vacc2x0 = vacc0x0;
- int32_t vacc2x1 = vacc0x1;
- int32_t vacc3x0 = vacc0x0;
- int32_t vacc3x1 = vacc0x1;
- w = (const void*) ((const int32_t*) w + 2);
-
- size_t k = kc;
- do {
- const int32_t va0 = (int32_t) (uint32_t) *a0++;
- const int32_t va1 = (int32_t) (uint32_t) *a1++;
- const int32_t va2 = (int32_t) (uint32_t) *a2++;
- const int32_t va3 = (int32_t) (uint32_t) *a3++;
-
- const int32_t vb0 = (int32_t) (uint32_t) ((const uint8_t*) w)[0] - vb_zero_point;
- const int32_t vb1 = (int32_t) (uint32_t) ((const uint8_t*) w)[1] - vb_zero_point;
- w = (const void*) ((const uint8_t*) w + 2);
-
- vacc0x0 += va0 * vb0;
- vacc0x1 += va0 * vb1;
- vacc1x0 += va1 * vb0;
- vacc1x1 += va1 * vb1;
- vacc2x0 += va2 * vb0;
- vacc2x1 += va2 * vb1;
- vacc3x0 += va3 * vb0;
- vacc3x1 += va3 * vb1;
-
- k -= sizeof(uint8_t);
- } while (k != 0);
-
- float vfpacc0x0 = (float) vacc0x0;
- float vfpacc0x1 = (float) vacc0x1;
- float vfpacc1x0 = (float) vacc1x0;
- float vfpacc1x1 = (float) vacc1x1;
- float vfpacc2x0 = (float) vacc2x0;
- float vfpacc2x1 = (float) vacc2x1;
- float vfpacc3x0 = (float) vacc3x0;
- float vfpacc3x1 = (float) vacc3x1;
-
- const float vscale = params->fp32_scalar_lrint.scale;
- vfpacc0x0 *= vscale;
- vfpacc0x1 *= vscale;
- vfpacc1x0 *= vscale;
- vfpacc1x1 *= vscale;
- vfpacc2x0 *= vscale;
- vfpacc2x1 *= vscale;
- vfpacc3x0 *= vscale;
- vfpacc3x1 *= vscale;
-
- long vrndacc0x0 = lrintf(vfpacc0x0);
- long vrndacc0x1 = lrintf(vfpacc0x1);
- long vrndacc1x0 = lrintf(vfpacc1x0);
- long vrndacc1x1 = lrintf(vfpacc1x1);
- long vrndacc2x0 = lrintf(vfpacc2x0);
- long vrndacc2x1 = lrintf(vfpacc2x1);
- long vrndacc3x0 = lrintf(vfpacc3x0);
- long vrndacc3x1 = lrintf(vfpacc3x1);
-
- const long voutput_min_less_zero_point = params->fp32_scalar_lrint.output_min_less_zero_point;
- vrndacc0x0 = XNN_UNPREDICTABLE(vrndacc0x0 < voutput_min_less_zero_point) ? voutput_min_less_zero_point : vrndacc0x0;
- vrndacc0x1 = XNN_UNPREDICTABLE(vrndacc0x1 < voutput_min_less_zero_point) ? voutput_min_less_zero_point : vrndacc0x1;
- vrndacc1x0 = XNN_UNPREDICTABLE(vrndacc1x0 < voutput_min_less_zero_point) ? voutput_min_less_zero_point : vrndacc1x0;
- vrndacc1x1 = XNN_UNPREDICTABLE(vrndacc1x1 < voutput_min_less_zero_point) ? voutput_min_less_zero_point : vrndacc1x1;
- vrndacc2x0 = XNN_UNPREDICTABLE(vrndacc2x0 < voutput_min_less_zero_point) ? voutput_min_less_zero_point : vrndacc2x0;
- vrndacc2x1 = XNN_UNPREDICTABLE(vrndacc2x1 < voutput_min_less_zero_point) ? voutput_min_less_zero_point : vrndacc2x1;
- vrndacc3x0 = XNN_UNPREDICTABLE(vrndacc3x0 < voutput_min_less_zero_point) ? voutput_min_less_zero_point : vrndacc3x0;
- vrndacc3x1 = XNN_UNPREDICTABLE(vrndacc3x1 < voutput_min_less_zero_point) ? voutput_min_less_zero_point : vrndacc3x1;
-
- const long voutput_max_less_zero_point = params->fp32_scalar_lrint.output_max_less_zero_point;
- vrndacc0x0 = XNN_UNPREDICTABLE(vrndacc0x0 > voutput_max_less_zero_point) ? voutput_max_less_zero_point : vrndacc0x0;
- vrndacc0x1 = XNN_UNPREDICTABLE(vrndacc0x1 > voutput_max_less_zero_point) ? voutput_max_less_zero_point : vrndacc0x1;
- vrndacc1x0 = XNN_UNPREDICTABLE(vrndacc1x0 > voutput_max_less_zero_point) ? voutput_max_less_zero_point : vrndacc1x0;
- vrndacc1x1 = XNN_UNPREDICTABLE(vrndacc1x1 > voutput_max_less_zero_point) ? voutput_max_less_zero_point : vrndacc1x1;
- vrndacc2x0 = XNN_UNPREDICTABLE(vrndacc2x0 > voutput_max_less_zero_point) ? voutput_max_less_zero_point : vrndacc2x0;
- vrndacc2x1 = XNN_UNPREDICTABLE(vrndacc2x1 > voutput_max_less_zero_point) ? voutput_max_less_zero_point : vrndacc2x1;
- vrndacc3x0 = XNN_UNPREDICTABLE(vrndacc3x0 > voutput_max_less_zero_point) ? voutput_max_less_zero_point : vrndacc3x0;
- vrndacc3x1 = XNN_UNPREDICTABLE(vrndacc3x1 > voutput_max_less_zero_point) ? voutput_max_less_zero_point : vrndacc3x1;
-
- const int32_t voutput_zero_point = params->fp32_scalar_lrint.output_zero_point;
- int32_t vout0x0 = (int32_t) vrndacc0x0 + voutput_zero_point;
- int32_t vout0x1 = (int32_t) vrndacc0x1 + voutput_zero_point;
- int32_t vout1x0 = (int32_t) vrndacc1x0 + voutput_zero_point;
- int32_t vout1x1 = (int32_t) vrndacc1x1 + voutput_zero_point;
- int32_t vout2x0 = (int32_t) vrndacc2x0 + voutput_zero_point;
- int32_t vout2x1 = (int32_t) vrndacc2x1 + voutput_zero_point;
- int32_t vout3x0 = (int32_t) vrndacc3x0 + voutput_zero_point;
- int32_t vout3x1 = (int32_t) vrndacc3x1 + voutput_zero_point;
-
- if XNN_LIKELY(nc >= 2) {
- c0[0] = (uint8_t) vout0x0;
- c0[1] = (uint8_t) vout0x1;
- c1[0] = (uint8_t) vout1x0;
- c1[1] = (uint8_t) vout1x1;
- c2[0] = (uint8_t) vout2x0;
- c2[1] = (uint8_t) vout2x1;
- c3[0] = (uint8_t) vout3x0;
- c3[1] = (uint8_t) vout3x1;
-
- a0 = (const uint8_t*) ((uintptr_t) a0 - kc);
- a1 = (const uint8_t*) ((uintptr_t) a1 - kc);
- a2 = (const uint8_t*) ((uintptr_t) a2 - kc);
- a3 = (const uint8_t*) ((uintptr_t) a3 - kc);
-
- c0 = (uint8_t*) ((uintptr_t) c0 + cn_stride);
- c1 = (uint8_t*) ((uintptr_t) c1 + cn_stride);
- c2 = (uint8_t*) ((uintptr_t) c2 + cn_stride);
- c3 = (uint8_t*) ((uintptr_t) c3 + cn_stride);
-
- nc -= 2;
- } else {
- if (nc & 1) {
- c0[0] = (uint8_t) vout0x0;
- c1[0] = (uint8_t) vout1x0;
- c2[0] = (uint8_t) vout2x0;
- c3[0] = (uint8_t) vout3x0;
- }
-
- nc = 0;
- }
- } while (nc != 0);
-}
diff --git a/src/qu8-gemm/gen/4x2-minmax-fp32-scalar-magic.c b/src/qu8-gemm/gen/4x2-minmax-fp32-scalar-lrintf.c
similarity index 77%
copy from src/qu8-gemm/gen/4x2-minmax-fp32-scalar-magic.c
copy to src/qu8-gemm/gen/4x2-minmax-fp32-scalar-lrintf.c
index 559f820..94011f1 100644
--- a/src/qu8-gemm/gen/4x2-minmax-fp32-scalar-magic.c
+++ b/src/qu8-gemm/gen/4x2-minmax-fp32-scalar-lrintf.c
@@ -8,14 +8,13 @@
// LICENSE file in the root directory of this source tree.
#include <assert.h>
-
-#include <fp16.h>
+#include <math.h>
#include <xnnpack/math.h>
#include <xnnpack/gemm.h>
-void xnn_qu8_gemm_minmax_fp32_ukernel_4x2__scalar_magic(
+void xnn_qu8_gemm_minmax_fp32_ukernel_4x2__scalar_lrintf(
size_t mr,
size_t nc,
size_t kc,
@@ -53,7 +52,7 @@
c3 = c2;
}
- const int32_t vb_zero_point = params->fp32_scalar_magic.kernel_zero_point;
+ const int32_t vb_zero_point = params->fp32_scalar_lrintf.kernel_zero_point;
do {
int32_t vacc0x0 = ((const int32_t*) w)[0];
int32_t vacc0x1 = ((const int32_t*) w)[1];
@@ -97,7 +96,7 @@
float vfpacc3x0 = (float) vacc3x0;
float vfpacc3x1 = (float) vacc3x1;
- const float vscale = params->fp32_scalar_magic.scale;
+ const float vscale = params->fp32_scalar_lrintf.scale;
vfpacc0x0 *= vscale;
vfpacc0x1 *= vscale;
vfpacc1x0 *= vscale;
@@ -107,7 +106,7 @@
vfpacc3x0 *= vscale;
vfpacc3x1 *= vscale;
- const float voutput_min_less_zero_point = params->fp32_scalar_magic.output_min_less_zero_point;
+ const float voutput_min_less_zero_point = params->fp32_scalar_lrintf.output_min_less_zero_point;
vfpacc0x0 = math_max_f32(vfpacc0x0, voutput_min_less_zero_point);
vfpacc0x1 = math_max_f32(vfpacc0x1, voutput_min_less_zero_point);
vfpacc1x0 = math_max_f32(vfpacc1x0, voutput_min_less_zero_point);
@@ -117,7 +116,7 @@
vfpacc3x0 = math_max_f32(vfpacc3x0, voutput_min_less_zero_point);
vfpacc3x1 = math_max_f32(vfpacc3x1, voutput_min_less_zero_point);
- const float voutput_max_less_zero_point = params->fp32_scalar_magic.output_max_less_zero_point;
+ const float voutput_max_less_zero_point = params->fp32_scalar_lrintf.output_max_less_zero_point;
vfpacc0x0 = math_min_f32(vfpacc0x0, voutput_max_less_zero_point);
vfpacc0x1 = math_min_f32(vfpacc0x1, voutput_max_less_zero_point);
vfpacc1x0 = math_min_f32(vfpacc1x0, voutput_max_less_zero_point);
@@ -127,25 +126,24 @@
vfpacc3x0 = math_min_f32(vfpacc3x0, voutput_max_less_zero_point);
vfpacc3x1 = math_min_f32(vfpacc3x1, voutput_max_less_zero_point);
- const float vmagic_bias = params->fp32_scalar_magic.magic_bias;
- vfpacc0x0 += vmagic_bias;
- vfpacc0x1 += vmagic_bias;
- vfpacc1x0 += vmagic_bias;
- vfpacc1x1 += vmagic_bias;
- vfpacc2x0 += vmagic_bias;
- vfpacc2x1 += vmagic_bias;
- vfpacc3x0 += vmagic_bias;
- vfpacc3x1 += vmagic_bias;
+ const int32_t vrndacc0x0 = (int32_t) lrintf(vfpacc0x0);
+ const int32_t vrndacc0x1 = (int32_t) lrintf(vfpacc0x1);
+ const int32_t vrndacc1x0 = (int32_t) lrintf(vfpacc1x0);
+ const int32_t vrndacc1x1 = (int32_t) lrintf(vfpacc1x1);
+ const int32_t vrndacc2x0 = (int32_t) lrintf(vfpacc2x0);
+ const int32_t vrndacc2x1 = (int32_t) lrintf(vfpacc2x1);
+ const int32_t vrndacc3x0 = (int32_t) lrintf(vfpacc3x0);
+ const int32_t vrndacc3x1 = (int32_t) lrintf(vfpacc3x1);
- const int32_t vmagic_bias_less_output_zero_point = params->fp32_scalar_magic.magic_bias_less_output_zero_point;
- int32_t vout0x0 = (int32_t) fp32_to_bits(vfpacc0x0) - vmagic_bias_less_output_zero_point;
- int32_t vout0x1 = (int32_t) fp32_to_bits(vfpacc0x1) - vmagic_bias_less_output_zero_point;
- int32_t vout1x0 = (int32_t) fp32_to_bits(vfpacc1x0) - vmagic_bias_less_output_zero_point;
- int32_t vout1x1 = (int32_t) fp32_to_bits(vfpacc1x1) - vmagic_bias_less_output_zero_point;
- int32_t vout2x0 = (int32_t) fp32_to_bits(vfpacc2x0) - vmagic_bias_less_output_zero_point;
- int32_t vout2x1 = (int32_t) fp32_to_bits(vfpacc2x1) - vmagic_bias_less_output_zero_point;
- int32_t vout3x0 = (int32_t) fp32_to_bits(vfpacc3x0) - vmagic_bias_less_output_zero_point;
- int32_t vout3x1 = (int32_t) fp32_to_bits(vfpacc3x1) - vmagic_bias_less_output_zero_point;
+ const int32_t voutput_zero_point = params->fp32_scalar_lrintf.output_zero_point;
+ int32_t vout0x0 = vrndacc0x0 + voutput_zero_point;
+ int32_t vout0x1 = vrndacc0x1 + voutput_zero_point;
+ int32_t vout1x0 = vrndacc1x0 + voutput_zero_point;
+ int32_t vout1x1 = vrndacc1x1 + voutput_zero_point;
+ int32_t vout2x0 = vrndacc2x0 + voutput_zero_point;
+ int32_t vout2x1 = vrndacc2x1 + voutput_zero_point;
+ int32_t vout3x0 = vrndacc3x0 + voutput_zero_point;
+ int32_t vout3x1 = vrndacc3x1 + voutput_zero_point;
if XNN_LIKELY(nc >= 2) {
c0[0] = (uint8_t) vout0x0;
diff --git a/src/qu8-gemm/gen/4x4-minmax-fp32-scalar-magic.c b/src/qu8-gemm/gen/4x4-minmax-fp32-scalar-fmagic.c
similarity index 95%
rename from src/qu8-gemm/gen/4x4-minmax-fp32-scalar-magic.c
rename to src/qu8-gemm/gen/4x4-minmax-fp32-scalar-fmagic.c
index 0856f89..bcc904f 100644
--- a/src/qu8-gemm/gen/4x4-minmax-fp32-scalar-magic.c
+++ b/src/qu8-gemm/gen/4x4-minmax-fp32-scalar-fmagic.c
@@ -15,7 +15,7 @@
#include <xnnpack/gemm.h>
-void xnn_qu8_gemm_minmax_fp32_ukernel_4x4__scalar_magic(
+void xnn_qu8_gemm_minmax_fp32_ukernel_4x4__scalar_fmagic(
size_t mr,
size_t nc,
size_t kc,
@@ -53,7 +53,7 @@
c3 = c2;
}
- const int32_t vb_zero_point = params->fp32_scalar_magic.kernel_zero_point;
+ const int32_t vb_zero_point = params->fp32_scalar_fmagic.kernel_zero_point;
do {
int32_t vacc0x0 = ((const int32_t*) w)[0];
int32_t vacc0x1 = ((const int32_t*) w)[1];
@@ -123,7 +123,7 @@
float vfpacc3x2 = (float) vacc3x2;
float vfpacc3x3 = (float) vacc3x3;
- const float vscale = params->fp32_scalar_magic.scale;
+ const float vscale = params->fp32_scalar_fmagic.scale;
vfpacc0x0 *= vscale;
vfpacc0x1 *= vscale;
vfpacc0x2 *= vscale;
@@ -141,7 +141,7 @@
vfpacc3x2 *= vscale;
vfpacc3x3 *= vscale;
- const float voutput_min_less_zero_point = params->fp32_scalar_magic.output_min_less_zero_point;
+ const float voutput_min_less_zero_point = params->fp32_scalar_fmagic.output_min_less_zero_point;
vfpacc0x0 = math_max_f32(vfpacc0x0, voutput_min_less_zero_point);
vfpacc0x1 = math_max_f32(vfpacc0x1, voutput_min_less_zero_point);
vfpacc0x2 = math_max_f32(vfpacc0x2, voutput_min_less_zero_point);
@@ -159,7 +159,7 @@
vfpacc3x2 = math_max_f32(vfpacc3x2, voutput_min_less_zero_point);
vfpacc3x3 = math_max_f32(vfpacc3x3, voutput_min_less_zero_point);
- const float voutput_max_less_zero_point = params->fp32_scalar_magic.output_max_less_zero_point;
+ const float voutput_max_less_zero_point = params->fp32_scalar_fmagic.output_max_less_zero_point;
vfpacc0x0 = math_min_f32(vfpacc0x0, voutput_max_less_zero_point);
vfpacc0x1 = math_min_f32(vfpacc0x1, voutput_max_less_zero_point);
vfpacc0x2 = math_min_f32(vfpacc0x2, voutput_max_less_zero_point);
@@ -177,7 +177,7 @@
vfpacc3x2 = math_min_f32(vfpacc3x2, voutput_max_less_zero_point);
vfpacc3x3 = math_min_f32(vfpacc3x3, voutput_max_less_zero_point);
- const float vmagic_bias = params->fp32_scalar_magic.magic_bias;
+ const float vmagic_bias = params->fp32_scalar_fmagic.magic_bias;
vfpacc0x0 += vmagic_bias;
vfpacc0x1 += vmagic_bias;
vfpacc0x2 += vmagic_bias;
@@ -195,7 +195,7 @@
vfpacc3x2 += vmagic_bias;
vfpacc3x3 += vmagic_bias;
- const int32_t vmagic_bias_less_output_zero_point = params->fp32_scalar_magic.magic_bias_less_output_zero_point;
+ const int32_t vmagic_bias_less_output_zero_point = params->fp32_scalar_fmagic.magic_bias_less_output_zero_point;
int32_t vout0x0 = (int32_t) fp32_to_bits(vfpacc0x0) - vmagic_bias_less_output_zero_point;
int32_t vout0x1 = (int32_t) fp32_to_bits(vfpacc0x1) - vmagic_bias_less_output_zero_point;
int32_t vout0x2 = (int32_t) fp32_to_bits(vfpacc0x2) - vmagic_bias_less_output_zero_point;
diff --git a/src/qu8-gemm/gen/4x4-minmax-fp32-scalar-lrint.c b/src/qu8-gemm/gen/4x4-minmax-fp32-scalar-lrint.c
deleted file mode 100644
index 2e9539a..0000000
--- a/src/qu8-gemm/gen/4x4-minmax-fp32-scalar-lrint.c
+++ /dev/null
@@ -1,272 +0,0 @@
-// Auto-generated file. Do not edit!
-// Template: src/qs8-gemm/scalar.c.in
-// Generator: tools/xngen
-//
-// Copyright 2021 Google LLC
-//
-// This source code is licensed under the BSD-style license found in the
-// LICENSE file in the root directory of this source tree.
-
-#include <assert.h>
-#include <math.h>
-
-#include <xnnpack/math.h>
-#include <xnnpack/gemm.h>
-
-
-void xnn_qu8_gemm_minmax_fp32_ukernel_4x4__scalar_lrint(
- size_t mr,
- size_t nc,
- size_t kc,
- const uint8_t* restrict a,
- size_t a_stride,
- const void* restrict w,
- uint8_t* restrict c,
- size_t cm_stride,
- size_t cn_stride,
- const union xnn_qu8_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
-{
- assert(mr != 0);
- assert(mr <= 4);
- assert(nc != 0);
- assert(kc != 0);
-
- const uint8_t* a0 = a;
- uint8_t* c0 = c;
- const uint8_t* a1 = (const uint8_t*) ((uintptr_t) a0 + a_stride);
- uint8_t* c1 = (uint8_t*) ((uintptr_t) c0 + cm_stride);
- if XNN_UNPREDICTABLE(mr < 2) {
- a1 = a0;
- c1 = c0;
- }
- const uint8_t* a2 = (const uint8_t*) ((uintptr_t) a1 + a_stride);
- uint8_t* c2 = (uint8_t*) ((uintptr_t) c1 + cm_stride);
- if XNN_UNPREDICTABLE(mr <= 2) {
- a2 = a1;
- c2 = c1;
- }
- const uint8_t* a3 = (const uint8_t*) ((uintptr_t) a2 + a_stride);
- uint8_t* c3 = (uint8_t*) ((uintptr_t) c2 + cm_stride);
- if XNN_UNPREDICTABLE(mr != 4) {
- a3 = a2;
- c3 = c2;
- }
-
- const int32_t vb_zero_point = params->fp32_scalar_lrint.kernel_zero_point;
- do {
- int32_t vacc0x0 = ((const int32_t*) w)[0];
- int32_t vacc0x1 = ((const int32_t*) w)[1];
- int32_t vacc0x2 = ((const int32_t*) w)[2];
- int32_t vacc0x3 = ((const int32_t*) w)[3];
- int32_t vacc1x0 = vacc0x0;
- int32_t vacc1x1 = vacc0x1;
- int32_t vacc1x2 = vacc0x2;
- int32_t vacc1x3 = vacc0x3;
- int32_t vacc2x0 = vacc0x0;
- int32_t vacc2x1 = vacc0x1;
- int32_t vacc2x2 = vacc0x2;
- int32_t vacc2x3 = vacc0x3;
- int32_t vacc3x0 = vacc0x0;
- int32_t vacc3x1 = vacc0x1;
- int32_t vacc3x2 = vacc0x2;
- int32_t vacc3x3 = vacc0x3;
- w = (const void*) ((const int32_t*) w + 4);
-
- size_t k = kc;
- do {
- const int32_t va0 = (int32_t) (uint32_t) *a0++;
- const int32_t va1 = (int32_t) (uint32_t) *a1++;
- const int32_t va2 = (int32_t) (uint32_t) *a2++;
- const int32_t va3 = (int32_t) (uint32_t) *a3++;
-
- const int32_t vb0 = (int32_t) (uint32_t) ((const uint8_t*) w)[0] - vb_zero_point;
- const int32_t vb1 = (int32_t) (uint32_t) ((const uint8_t*) w)[1] - vb_zero_point;
- const int32_t vb2 = (int32_t) (uint32_t) ((const uint8_t*) w)[2] - vb_zero_point;
- const int32_t vb3 = (int32_t) (uint32_t) ((const uint8_t*) w)[3] - vb_zero_point;
- w = (const void*) ((const uint8_t*) w + 4);
-
- vacc0x0 += va0 * vb0;
- vacc0x1 += va0 * vb1;
- vacc0x2 += va0 * vb2;
- vacc0x3 += va0 * vb3;
- vacc1x0 += va1 * vb0;
- vacc1x1 += va1 * vb1;
- vacc1x2 += va1 * vb2;
- vacc1x3 += va1 * vb3;
- vacc2x0 += va2 * vb0;
- vacc2x1 += va2 * vb1;
- vacc2x2 += va2 * vb2;
- vacc2x3 += va2 * vb3;
- vacc3x0 += va3 * vb0;
- vacc3x1 += va3 * vb1;
- vacc3x2 += va3 * vb2;
- vacc3x3 += va3 * vb3;
-
- k -= sizeof(uint8_t);
- } while (k != 0);
-
- float vfpacc0x0 = (float) vacc0x0;
- float vfpacc0x1 = (float) vacc0x1;
- float vfpacc0x2 = (float) vacc0x2;
- float vfpacc0x3 = (float) vacc0x3;
- float vfpacc1x0 = (float) vacc1x0;
- float vfpacc1x1 = (float) vacc1x1;
- float vfpacc1x2 = (float) vacc1x2;
- float vfpacc1x3 = (float) vacc1x3;
- float vfpacc2x0 = (float) vacc2x0;
- float vfpacc2x1 = (float) vacc2x1;
- float vfpacc2x2 = (float) vacc2x2;
- float vfpacc2x3 = (float) vacc2x3;
- float vfpacc3x0 = (float) vacc3x0;
- float vfpacc3x1 = (float) vacc3x1;
- float vfpacc3x2 = (float) vacc3x2;
- float vfpacc3x3 = (float) vacc3x3;
-
- const float vscale = params->fp32_scalar_lrint.scale;
- vfpacc0x0 *= vscale;
- vfpacc0x1 *= vscale;
- vfpacc0x2 *= vscale;
- vfpacc0x3 *= vscale;
- vfpacc1x0 *= vscale;
- vfpacc1x1 *= vscale;
- vfpacc1x2 *= vscale;
- vfpacc1x3 *= vscale;
- vfpacc2x0 *= vscale;
- vfpacc2x1 *= vscale;
- vfpacc2x2 *= vscale;
- vfpacc2x3 *= vscale;
- vfpacc3x0 *= vscale;
- vfpacc3x1 *= vscale;
- vfpacc3x2 *= vscale;
- vfpacc3x3 *= vscale;
-
- long vrndacc0x0 = lrintf(vfpacc0x0);
- long vrndacc0x1 = lrintf(vfpacc0x1);
- long vrndacc0x2 = lrintf(vfpacc0x2);
- long vrndacc0x3 = lrintf(vfpacc0x3);
- long vrndacc1x0 = lrintf(vfpacc1x0);
- long vrndacc1x1 = lrintf(vfpacc1x1);
- long vrndacc1x2 = lrintf(vfpacc1x2);
- long vrndacc1x3 = lrintf(vfpacc1x3);
- long vrndacc2x0 = lrintf(vfpacc2x0);
- long vrndacc2x1 = lrintf(vfpacc2x1);
- long vrndacc2x2 = lrintf(vfpacc2x2);
- long vrndacc2x3 = lrintf(vfpacc2x3);
- long vrndacc3x0 = lrintf(vfpacc3x0);
- long vrndacc3x1 = lrintf(vfpacc3x1);
- long vrndacc3x2 = lrintf(vfpacc3x2);
- long vrndacc3x3 = lrintf(vfpacc3x3);
-
- const long voutput_min_less_zero_point = params->fp32_scalar_lrint.output_min_less_zero_point;
- vrndacc0x0 = XNN_UNPREDICTABLE(vrndacc0x0 < voutput_min_less_zero_point) ? voutput_min_less_zero_point : vrndacc0x0;
- vrndacc0x1 = XNN_UNPREDICTABLE(vrndacc0x1 < voutput_min_less_zero_point) ? voutput_min_less_zero_point : vrndacc0x1;
- vrndacc0x2 = XNN_UNPREDICTABLE(vrndacc0x2 < voutput_min_less_zero_point) ? voutput_min_less_zero_point : vrndacc0x2;
- vrndacc0x3 = XNN_UNPREDICTABLE(vrndacc0x3 < voutput_min_less_zero_point) ? voutput_min_less_zero_point : vrndacc0x3;
- vrndacc1x0 = XNN_UNPREDICTABLE(vrndacc1x0 < voutput_min_less_zero_point) ? voutput_min_less_zero_point : vrndacc1x0;
- vrndacc1x1 = XNN_UNPREDICTABLE(vrndacc1x1 < voutput_min_less_zero_point) ? voutput_min_less_zero_point : vrndacc1x1;
- vrndacc1x2 = XNN_UNPREDICTABLE(vrndacc1x2 < voutput_min_less_zero_point) ? voutput_min_less_zero_point : vrndacc1x2;
- vrndacc1x3 = XNN_UNPREDICTABLE(vrndacc1x3 < voutput_min_less_zero_point) ? voutput_min_less_zero_point : vrndacc1x3;
- vrndacc2x0 = XNN_UNPREDICTABLE(vrndacc2x0 < voutput_min_less_zero_point) ? voutput_min_less_zero_point : vrndacc2x0;
- vrndacc2x1 = XNN_UNPREDICTABLE(vrndacc2x1 < voutput_min_less_zero_point) ? voutput_min_less_zero_point : vrndacc2x1;
- vrndacc2x2 = XNN_UNPREDICTABLE(vrndacc2x2 < voutput_min_less_zero_point) ? voutput_min_less_zero_point : vrndacc2x2;
- vrndacc2x3 = XNN_UNPREDICTABLE(vrndacc2x3 < voutput_min_less_zero_point) ? voutput_min_less_zero_point : vrndacc2x3;
- vrndacc3x0 = XNN_UNPREDICTABLE(vrndacc3x0 < voutput_min_less_zero_point) ? voutput_min_less_zero_point : vrndacc3x0;
- vrndacc3x1 = XNN_UNPREDICTABLE(vrndacc3x1 < voutput_min_less_zero_point) ? voutput_min_less_zero_point : vrndacc3x1;
- vrndacc3x2 = XNN_UNPREDICTABLE(vrndacc3x2 < voutput_min_less_zero_point) ? voutput_min_less_zero_point : vrndacc3x2;
- vrndacc3x3 = XNN_UNPREDICTABLE(vrndacc3x3 < voutput_min_less_zero_point) ? voutput_min_less_zero_point : vrndacc3x3;
-
- const long voutput_max_less_zero_point = params->fp32_scalar_lrint.output_max_less_zero_point;
- vrndacc0x0 = XNN_UNPREDICTABLE(vrndacc0x0 > voutput_max_less_zero_point) ? voutput_max_less_zero_point : vrndacc0x0;
- vrndacc0x1 = XNN_UNPREDICTABLE(vrndacc0x1 > voutput_max_less_zero_point) ? voutput_max_less_zero_point : vrndacc0x1;
- vrndacc0x2 = XNN_UNPREDICTABLE(vrndacc0x2 > voutput_max_less_zero_point) ? voutput_max_less_zero_point : vrndacc0x2;
- vrndacc0x3 = XNN_UNPREDICTABLE(vrndacc0x3 > voutput_max_less_zero_point) ? voutput_max_less_zero_point : vrndacc0x3;
- vrndacc1x0 = XNN_UNPREDICTABLE(vrndacc1x0 > voutput_max_less_zero_point) ? voutput_max_less_zero_point : vrndacc1x0;
- vrndacc1x1 = XNN_UNPREDICTABLE(vrndacc1x1 > voutput_max_less_zero_point) ? voutput_max_less_zero_point : vrndacc1x1;
- vrndacc1x2 = XNN_UNPREDICTABLE(vrndacc1x2 > voutput_max_less_zero_point) ? voutput_max_less_zero_point : vrndacc1x2;
- vrndacc1x3 = XNN_UNPREDICTABLE(vrndacc1x3 > voutput_max_less_zero_point) ? voutput_max_less_zero_point : vrndacc1x3;
- vrndacc2x0 = XNN_UNPREDICTABLE(vrndacc2x0 > voutput_max_less_zero_point) ? voutput_max_less_zero_point : vrndacc2x0;
- vrndacc2x1 = XNN_UNPREDICTABLE(vrndacc2x1 > voutput_max_less_zero_point) ? voutput_max_less_zero_point : vrndacc2x1;
- vrndacc2x2 = XNN_UNPREDICTABLE(vrndacc2x2 > voutput_max_less_zero_point) ? voutput_max_less_zero_point : vrndacc2x2;
- vrndacc2x3 = XNN_UNPREDICTABLE(vrndacc2x3 > voutput_max_less_zero_point) ? voutput_max_less_zero_point : vrndacc2x3;
- vrndacc3x0 = XNN_UNPREDICTABLE(vrndacc3x0 > voutput_max_less_zero_point) ? voutput_max_less_zero_point : vrndacc3x0;
- vrndacc3x1 = XNN_UNPREDICTABLE(vrndacc3x1 > voutput_max_less_zero_point) ? voutput_max_less_zero_point : vrndacc3x1;
- vrndacc3x2 = XNN_UNPREDICTABLE(vrndacc3x2 > voutput_max_less_zero_point) ? voutput_max_less_zero_point : vrndacc3x2;
- vrndacc3x3 = XNN_UNPREDICTABLE(vrndacc3x3 > voutput_max_less_zero_point) ? voutput_max_less_zero_point : vrndacc3x3;
-
- const int32_t voutput_zero_point = params->fp32_scalar_lrint.output_zero_point;
- int32_t vout0x0 = (int32_t) vrndacc0x0 + voutput_zero_point;
- int32_t vout0x1 = (int32_t) vrndacc0x1 + voutput_zero_point;
- int32_t vout0x2 = (int32_t) vrndacc0x2 + voutput_zero_point;
- int32_t vout0x3 = (int32_t) vrndacc0x3 + voutput_zero_point;
- int32_t vout1x0 = (int32_t) vrndacc1x0 + voutput_zero_point;
- int32_t vout1x1 = (int32_t) vrndacc1x1 + voutput_zero_point;
- int32_t vout1x2 = (int32_t) vrndacc1x2 + voutput_zero_point;
- int32_t vout1x3 = (int32_t) vrndacc1x3 + voutput_zero_point;
- int32_t vout2x0 = (int32_t) vrndacc2x0 + voutput_zero_point;
- int32_t vout2x1 = (int32_t) vrndacc2x1 + voutput_zero_point;
- int32_t vout2x2 = (int32_t) vrndacc2x2 + voutput_zero_point;
- int32_t vout2x3 = (int32_t) vrndacc2x3 + voutput_zero_point;
- int32_t vout3x0 = (int32_t) vrndacc3x0 + voutput_zero_point;
- int32_t vout3x1 = (int32_t) vrndacc3x1 + voutput_zero_point;
- int32_t vout3x2 = (int32_t) vrndacc3x2 + voutput_zero_point;
- int32_t vout3x3 = (int32_t) vrndacc3x3 + voutput_zero_point;
-
- if XNN_LIKELY(nc >= 4) {
- c0[0] = (uint8_t) vout0x0;
- c0[1] = (uint8_t) vout0x1;
- c0[2] = (uint8_t) vout0x2;
- c0[3] = (uint8_t) vout0x3;
- c1[0] = (uint8_t) vout1x0;
- c1[1] = (uint8_t) vout1x1;
- c1[2] = (uint8_t) vout1x2;
- c1[3] = (uint8_t) vout1x3;
- c2[0] = (uint8_t) vout2x0;
- c2[1] = (uint8_t) vout2x1;
- c2[2] = (uint8_t) vout2x2;
- c2[3] = (uint8_t) vout2x3;
- c3[0] = (uint8_t) vout3x0;
- c3[1] = (uint8_t) vout3x1;
- c3[2] = (uint8_t) vout3x2;
- c3[3] = (uint8_t) vout3x3;
-
- a0 = (const uint8_t*) ((uintptr_t) a0 - kc);
- a1 = (const uint8_t*) ((uintptr_t) a1 - kc);
- a2 = (const uint8_t*) ((uintptr_t) a2 - kc);
- a3 = (const uint8_t*) ((uintptr_t) a3 - kc);
-
- c0 = (uint8_t*) ((uintptr_t) c0 + cn_stride);
- c1 = (uint8_t*) ((uintptr_t) c1 + cn_stride);
- c2 = (uint8_t*) ((uintptr_t) c2 + cn_stride);
- c3 = (uint8_t*) ((uintptr_t) c3 + cn_stride);
-
- nc -= 4;
- } else {
- if (nc & 2) {
- c0[0] = (uint8_t) vout0x0;
- c0[1] = (uint8_t) vout0x1;
- vout0x0 = vout0x2;
- c0 += 2;
- c1[0] = (uint8_t) vout1x0;
- c1[1] = (uint8_t) vout1x1;
- vout1x0 = vout1x2;
- c1 += 2;
- c2[0] = (uint8_t) vout2x0;
- c2[1] = (uint8_t) vout2x1;
- vout2x0 = vout2x2;
- c2 += 2;
- c3[0] = (uint8_t) vout3x0;
- c3[1] = (uint8_t) vout3x1;
- vout3x0 = vout3x2;
- c3 += 2;
- }
- if (nc & 1) {
- c0[0] = (uint8_t) vout0x0;
- c1[0] = (uint8_t) vout1x0;
- c2[0] = (uint8_t) vout2x0;
- c3[0] = (uint8_t) vout3x0;
- }
-
- nc = 0;
- }
- } while (nc != 0);
-}
diff --git a/src/qu8-gemm/gen/4x4-minmax-fp32-scalar-magic.c b/src/qu8-gemm/gen/4x4-minmax-fp32-scalar-lrintf.c
similarity index 76%
copy from src/qu8-gemm/gen/4x4-minmax-fp32-scalar-magic.c
copy to src/qu8-gemm/gen/4x4-minmax-fp32-scalar-lrintf.c
index 0856f89..9996838 100644
--- a/src/qu8-gemm/gen/4x4-minmax-fp32-scalar-magic.c
+++ b/src/qu8-gemm/gen/4x4-minmax-fp32-scalar-lrintf.c
@@ -8,14 +8,13 @@
// LICENSE file in the root directory of this source tree.
#include <assert.h>
-
-#include <fp16.h>
+#include <math.h>
#include <xnnpack/math.h>
#include <xnnpack/gemm.h>
-void xnn_qu8_gemm_minmax_fp32_ukernel_4x4__scalar_magic(
+void xnn_qu8_gemm_minmax_fp32_ukernel_4x4__scalar_lrintf(
size_t mr,
size_t nc,
size_t kc,
@@ -53,7 +52,7 @@
c3 = c2;
}
- const int32_t vb_zero_point = params->fp32_scalar_magic.kernel_zero_point;
+ const int32_t vb_zero_point = params->fp32_scalar_lrintf.kernel_zero_point;
do {
int32_t vacc0x0 = ((const int32_t*) w)[0];
int32_t vacc0x1 = ((const int32_t*) w)[1];
@@ -123,7 +122,7 @@
float vfpacc3x2 = (float) vacc3x2;
float vfpacc3x3 = (float) vacc3x3;
- const float vscale = params->fp32_scalar_magic.scale;
+ const float vscale = params->fp32_scalar_lrintf.scale;
vfpacc0x0 *= vscale;
vfpacc0x1 *= vscale;
vfpacc0x2 *= vscale;
@@ -141,7 +140,7 @@
vfpacc3x2 *= vscale;
vfpacc3x3 *= vscale;
- const float voutput_min_less_zero_point = params->fp32_scalar_magic.output_min_less_zero_point;
+ const float voutput_min_less_zero_point = params->fp32_scalar_lrintf.output_min_less_zero_point;
vfpacc0x0 = math_max_f32(vfpacc0x0, voutput_min_less_zero_point);
vfpacc0x1 = math_max_f32(vfpacc0x1, voutput_min_less_zero_point);
vfpacc0x2 = math_max_f32(vfpacc0x2, voutput_min_less_zero_point);
@@ -159,7 +158,7 @@
vfpacc3x2 = math_max_f32(vfpacc3x2, voutput_min_less_zero_point);
vfpacc3x3 = math_max_f32(vfpacc3x3, voutput_min_less_zero_point);
- const float voutput_max_less_zero_point = params->fp32_scalar_magic.output_max_less_zero_point;
+ const float voutput_max_less_zero_point = params->fp32_scalar_lrintf.output_max_less_zero_point;
vfpacc0x0 = math_min_f32(vfpacc0x0, voutput_max_less_zero_point);
vfpacc0x1 = math_min_f32(vfpacc0x1, voutput_max_less_zero_point);
vfpacc0x2 = math_min_f32(vfpacc0x2, voutput_max_less_zero_point);
@@ -177,41 +176,40 @@
vfpacc3x2 = math_min_f32(vfpacc3x2, voutput_max_less_zero_point);
vfpacc3x3 = math_min_f32(vfpacc3x3, voutput_max_less_zero_point);
- const float vmagic_bias = params->fp32_scalar_magic.magic_bias;
- vfpacc0x0 += vmagic_bias;
- vfpacc0x1 += vmagic_bias;
- vfpacc0x2 += vmagic_bias;
- vfpacc0x3 += vmagic_bias;
- vfpacc1x0 += vmagic_bias;
- vfpacc1x1 += vmagic_bias;
- vfpacc1x2 += vmagic_bias;
- vfpacc1x3 += vmagic_bias;
- vfpacc2x0 += vmagic_bias;
- vfpacc2x1 += vmagic_bias;
- vfpacc2x2 += vmagic_bias;
- vfpacc2x3 += vmagic_bias;
- vfpacc3x0 += vmagic_bias;
- vfpacc3x1 += vmagic_bias;
- vfpacc3x2 += vmagic_bias;
- vfpacc3x3 += vmagic_bias;
+ const int32_t vrndacc0x0 = (int32_t) lrintf(vfpacc0x0);
+ const int32_t vrndacc0x1 = (int32_t) lrintf(vfpacc0x1);
+ const int32_t vrndacc0x2 = (int32_t) lrintf(vfpacc0x2);
+ const int32_t vrndacc0x3 = (int32_t) lrintf(vfpacc0x3);
+ const int32_t vrndacc1x0 = (int32_t) lrintf(vfpacc1x0);
+ const int32_t vrndacc1x1 = (int32_t) lrintf(vfpacc1x1);
+ const int32_t vrndacc1x2 = (int32_t) lrintf(vfpacc1x2);
+ const int32_t vrndacc1x3 = (int32_t) lrintf(vfpacc1x3);
+ const int32_t vrndacc2x0 = (int32_t) lrintf(vfpacc2x0);
+ const int32_t vrndacc2x1 = (int32_t) lrintf(vfpacc2x1);
+ const int32_t vrndacc2x2 = (int32_t) lrintf(vfpacc2x2);
+ const int32_t vrndacc2x3 = (int32_t) lrintf(vfpacc2x3);
+ const int32_t vrndacc3x0 = (int32_t) lrintf(vfpacc3x0);
+ const int32_t vrndacc3x1 = (int32_t) lrintf(vfpacc3x1);
+ const int32_t vrndacc3x2 = (int32_t) lrintf(vfpacc3x2);
+ const int32_t vrndacc3x3 = (int32_t) lrintf(vfpacc3x3);
- const int32_t vmagic_bias_less_output_zero_point = params->fp32_scalar_magic.magic_bias_less_output_zero_point;
- int32_t vout0x0 = (int32_t) fp32_to_bits(vfpacc0x0) - vmagic_bias_less_output_zero_point;
- int32_t vout0x1 = (int32_t) fp32_to_bits(vfpacc0x1) - vmagic_bias_less_output_zero_point;
- int32_t vout0x2 = (int32_t) fp32_to_bits(vfpacc0x2) - vmagic_bias_less_output_zero_point;
- int32_t vout0x3 = (int32_t) fp32_to_bits(vfpacc0x3) - vmagic_bias_less_output_zero_point;
- int32_t vout1x0 = (int32_t) fp32_to_bits(vfpacc1x0) - vmagic_bias_less_output_zero_point;
- int32_t vout1x1 = (int32_t) fp32_to_bits(vfpacc1x1) - vmagic_bias_less_output_zero_point;
- int32_t vout1x2 = (int32_t) fp32_to_bits(vfpacc1x2) - vmagic_bias_less_output_zero_point;
- int32_t vout1x3 = (int32_t) fp32_to_bits(vfpacc1x3) - vmagic_bias_less_output_zero_point;
- int32_t vout2x0 = (int32_t) fp32_to_bits(vfpacc2x0) - vmagic_bias_less_output_zero_point;
- int32_t vout2x1 = (int32_t) fp32_to_bits(vfpacc2x1) - vmagic_bias_less_output_zero_point;
- int32_t vout2x2 = (int32_t) fp32_to_bits(vfpacc2x2) - vmagic_bias_less_output_zero_point;
- int32_t vout2x3 = (int32_t) fp32_to_bits(vfpacc2x3) - vmagic_bias_less_output_zero_point;
- int32_t vout3x0 = (int32_t) fp32_to_bits(vfpacc3x0) - vmagic_bias_less_output_zero_point;
- int32_t vout3x1 = (int32_t) fp32_to_bits(vfpacc3x1) - vmagic_bias_less_output_zero_point;
- int32_t vout3x2 = (int32_t) fp32_to_bits(vfpacc3x2) - vmagic_bias_less_output_zero_point;
- int32_t vout3x3 = (int32_t) fp32_to_bits(vfpacc3x3) - vmagic_bias_less_output_zero_point;
+ const int32_t voutput_zero_point = params->fp32_scalar_lrintf.output_zero_point;
+ int32_t vout0x0 = vrndacc0x0 + voutput_zero_point;
+ int32_t vout0x1 = vrndacc0x1 + voutput_zero_point;
+ int32_t vout0x2 = vrndacc0x2 + voutput_zero_point;
+ int32_t vout0x3 = vrndacc0x3 + voutput_zero_point;
+ int32_t vout1x0 = vrndacc1x0 + voutput_zero_point;
+ int32_t vout1x1 = vrndacc1x1 + voutput_zero_point;
+ int32_t vout1x2 = vrndacc1x2 + voutput_zero_point;
+ int32_t vout1x3 = vrndacc1x3 + voutput_zero_point;
+ int32_t vout2x0 = vrndacc2x0 + voutput_zero_point;
+ int32_t vout2x1 = vrndacc2x1 + voutput_zero_point;
+ int32_t vout2x2 = vrndacc2x2 + voutput_zero_point;
+ int32_t vout2x3 = vrndacc2x3 + voutput_zero_point;
+ int32_t vout3x0 = vrndacc3x0 + voutput_zero_point;
+ int32_t vout3x1 = vrndacc3x1 + voutput_zero_point;
+ int32_t vout3x2 = vrndacc3x2 + voutput_zero_point;
+ int32_t vout3x3 = vrndacc3x3 + voutput_zero_point;
if XNN_LIKELY(nc >= 4) {
c0[0] = (uint8_t) vout0x0;
diff --git a/src/qu8-igemm/gen/1x2-minmax-fp32-scalar-magic.c b/src/qu8-igemm/gen/1x2-minmax-fp32-scalar-fmagic.c
similarity index 88%
rename from src/qu8-igemm/gen/1x2-minmax-fp32-scalar-magic.c
rename to src/qu8-igemm/gen/1x2-minmax-fp32-scalar-fmagic.c
index 30b5378..2a9ae22 100644
--- a/src/qu8-igemm/gen/1x2-minmax-fp32-scalar-magic.c
+++ b/src/qu8-igemm/gen/1x2-minmax-fp32-scalar-fmagic.c
@@ -15,7 +15,7 @@
#include <xnnpack/gemm.h>
-void xnn_qu8_igemm_minmax_fp32_ukernel_1x2__scalar_magic(
+void xnn_qu8_igemm_minmax_fp32_ukernel_1x2__scalar_fmagic(
size_t mr,
size_t nc,
size_t kc,
@@ -41,7 +41,7 @@
uint8_t* c0 = c;
- const int32_t vb_zero_point = params->fp32_scalar_magic.kernel_zero_point;
+ const int32_t vb_zero_point = params->fp32_scalar_fmagic.kernel_zero_point;
do {
int32_t vacc0x0 = ((const int32_t*) w)[0];
int32_t vacc0x1 = ((const int32_t*) w)[1];
@@ -75,23 +75,23 @@
float vfpacc0x0 = (float) vacc0x0;
float vfpacc0x1 = (float) vacc0x1;
- const float vscale = params->fp32_scalar_magic.scale;
+ const float vscale = params->fp32_scalar_fmagic.scale;
vfpacc0x0 *= vscale;
vfpacc0x1 *= vscale;
- const float voutput_min_less_zero_point = params->fp32_scalar_magic.output_min_less_zero_point;
+ const float voutput_min_less_zero_point = params->fp32_scalar_fmagic.output_min_less_zero_point;
vfpacc0x0 = math_max_f32(vfpacc0x0, voutput_min_less_zero_point);
vfpacc0x1 = math_max_f32(vfpacc0x1, voutput_min_less_zero_point);
- const float voutput_max_less_zero_point = params->fp32_scalar_magic.output_max_less_zero_point;
+ const float voutput_max_less_zero_point = params->fp32_scalar_fmagic.output_max_less_zero_point;
vfpacc0x0 = math_min_f32(vfpacc0x0, voutput_max_less_zero_point);
vfpacc0x1 = math_min_f32(vfpacc0x1, voutput_max_less_zero_point);
- const float vmagic_bias = params->fp32_scalar_magic.magic_bias;
+ const float vmagic_bias = params->fp32_scalar_fmagic.magic_bias;
vfpacc0x0 += vmagic_bias;
vfpacc0x1 += vmagic_bias;
- const int32_t vmagic_bias_less_output_zero_point = params->fp32_scalar_magic.magic_bias_less_output_zero_point;
+ const int32_t vmagic_bias_less_output_zero_point = params->fp32_scalar_fmagic.magic_bias_less_output_zero_point;
int32_t vout0x0 = (int32_t) fp32_to_bits(vfpacc0x0) - vmagic_bias_less_output_zero_point;
int32_t vout0x1 = (int32_t) fp32_to_bits(vfpacc0x1) - vmagic_bias_less_output_zero_point;
diff --git a/src/qu8-igemm/gen/1x2-minmax-fp32-scalar-lrint.c b/src/qu8-igemm/gen/1x2-minmax-fp32-scalar-lrint.c
deleted file mode 100644
index d410583..0000000
--- a/src/qu8-igemm/gen/1x2-minmax-fp32-scalar-lrint.c
+++ /dev/null
@@ -1,112 +0,0 @@
-// Auto-generated file. Do not edit!
-// Template: src/qs8-igemm/scalar.c.in
-// Generator: tools/xngen
-//
-// Copyright 2021 Google LLC
-//
-// This source code is licensed under the BSD-style license found in the
-// LICENSE file in the root directory of this source tree.
-
-#include <assert.h>
-#include <math.h>
-
-#include <xnnpack/math.h>
-#include <xnnpack/gemm.h>
-
-
-void xnn_qu8_igemm_minmax_fp32_ukernel_1x2__scalar_lrint(
- size_t mr,
- size_t nc,
- size_t kc,
- size_t ks,
- const uint8_t**restrict a,
- const void*restrict w,
- uint8_t*restrict c,
- size_t cm_stride,
- size_t cn_stride,
- size_t a_offset,
- const uint8_t* zero,
- const union xnn_qu8_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
-{
- assert(mr != 0);
- assert(mr <= 1);
- assert(nc != 0);
- assert(kc != 0);
- assert(ks != 0);
- assert(ks % (1 * sizeof(void*)) == 0);
- assert(a != NULL);
- assert(w != NULL);
- assert(c != NULL);
-
- uint8_t* c0 = c;
-
- const int32_t vb_zero_point = params->fp32_scalar_lrint.kernel_zero_point;
- do {
- int32_t vacc0x0 = ((const int32_t*) w)[0];
- int32_t vacc0x1 = ((const int32_t*) w)[1];
- w = (const void*) ((const int32_t*) w + 2);
-
- size_t p = ks;
- do {
- const uint8_t* restrict a0 = a[0];
- assert(a0 != NULL);
- if XNN_UNPREDICTABLE(a0 != zero) {
- a0 = (const uint8_t*) ((uintptr_t) a0 + a_offset);
- }
- a += 1;
-
- size_t k = kc;
- do {
- const int32_t va0 = (int32_t) (uint32_t) *a0++;
-
- const int32_t vb0 = (int32_t) (uint32_t) ((const uint8_t*) w)[0] - vb_zero_point;
- const int32_t vb1 = (int32_t) (uint32_t) ((const uint8_t*) w)[1] - vb_zero_point;
- w = (const void*) ((const uint8_t*) w + 2);
-
- vacc0x0 += va0 * vb0;
- vacc0x1 += va0 * vb1;
-
- k -= sizeof(uint8_t);
- } while (k != 0);
- p -= 1 * sizeof(void*);
- } while (p != 0);
-
- float vfpacc0x0 = (float) vacc0x0;
- float vfpacc0x1 = (float) vacc0x1;
-
- const float vscale = params->fp32_scalar_lrint.scale;
- vfpacc0x0 *= vscale;
- vfpacc0x1 *= vscale;
-
- long vrndacc0x0 = lrintf(vfpacc0x0);
- long vrndacc0x1 = lrintf(vfpacc0x1);
-
- const long voutput_min_less_zero_point = params->fp32_scalar_lrint.output_min_less_zero_point;
- vrndacc0x0 = XNN_UNPREDICTABLE(vrndacc0x0 < voutput_min_less_zero_point) ? voutput_min_less_zero_point : vrndacc0x0;
- vrndacc0x1 = XNN_UNPREDICTABLE(vrndacc0x1 < voutput_min_less_zero_point) ? voutput_min_less_zero_point : vrndacc0x1;
-
- const long voutput_max_less_zero_point = params->fp32_scalar_lrint.output_max_less_zero_point;
- vrndacc0x0 = XNN_UNPREDICTABLE(vrndacc0x0 > voutput_max_less_zero_point) ? voutput_max_less_zero_point : vrndacc0x0;
- vrndacc0x1 = XNN_UNPREDICTABLE(vrndacc0x1 > voutput_max_less_zero_point) ? voutput_max_less_zero_point : vrndacc0x1;
-
- const int32_t voutput_zero_point = params->fp32_scalar_lrint.output_zero_point;
- int32_t vout0x0 = (int32_t) vrndacc0x0 + voutput_zero_point;
- int32_t vout0x1 = (int32_t) vrndacc0x1 + voutput_zero_point;
-
- if XNN_LIKELY(nc >= 2) {
- c0[0] = (uint8_t) vout0x0;
- c0[1] = (uint8_t) vout0x1;
-
- c0 = (uint8_t*) ((uintptr_t) c0 + cn_stride);
-
- a = (const uint8_t**restrict) ((uintptr_t) a - ks);
- nc -= 2;
- } else {
- if (nc & 1) {
- c0[0] = (uint8_t) vout0x0;
- }
-
- nc = 0;
- }
- } while (nc != 0);
-}
diff --git a/src/qu8-igemm/gen/1x2-minmax-fp32-scalar-magic.c b/src/qu8-igemm/gen/1x2-minmax-fp32-scalar-lrintf.c
similarity index 78%
copy from src/qu8-igemm/gen/1x2-minmax-fp32-scalar-magic.c
copy to src/qu8-igemm/gen/1x2-minmax-fp32-scalar-lrintf.c
index 30b5378..322e443 100644
--- a/src/qu8-igemm/gen/1x2-minmax-fp32-scalar-magic.c
+++ b/src/qu8-igemm/gen/1x2-minmax-fp32-scalar-lrintf.c
@@ -8,14 +8,13 @@
// LICENSE file in the root directory of this source tree.
#include <assert.h>
-
-#include <fp16.h>
+#include <math.h>
#include <xnnpack/math.h>
#include <xnnpack/gemm.h>
-void xnn_qu8_igemm_minmax_fp32_ukernel_1x2__scalar_magic(
+void xnn_qu8_igemm_minmax_fp32_ukernel_1x2__scalar_lrintf(
size_t mr,
size_t nc,
size_t kc,
@@ -41,7 +40,7 @@
uint8_t* c0 = c;
- const int32_t vb_zero_point = params->fp32_scalar_magic.kernel_zero_point;
+ const int32_t vb_zero_point = params->fp32_scalar_lrintf.kernel_zero_point;
do {
int32_t vacc0x0 = ((const int32_t*) w)[0];
int32_t vacc0x1 = ((const int32_t*) w)[1];
@@ -75,25 +74,24 @@
float vfpacc0x0 = (float) vacc0x0;
float vfpacc0x1 = (float) vacc0x1;
- const float vscale = params->fp32_scalar_magic.scale;
+ const float vscale = params->fp32_scalar_lrintf.scale;
vfpacc0x0 *= vscale;
vfpacc0x1 *= vscale;
- const float voutput_min_less_zero_point = params->fp32_scalar_magic.output_min_less_zero_point;
+ const float voutput_min_less_zero_point = params->fp32_scalar_lrintf.output_min_less_zero_point;
vfpacc0x0 = math_max_f32(vfpacc0x0, voutput_min_less_zero_point);
vfpacc0x1 = math_max_f32(vfpacc0x1, voutput_min_less_zero_point);
- const float voutput_max_less_zero_point = params->fp32_scalar_magic.output_max_less_zero_point;
+ const float voutput_max_less_zero_point = params->fp32_scalar_lrintf.output_max_less_zero_point;
vfpacc0x0 = math_min_f32(vfpacc0x0, voutput_max_less_zero_point);
vfpacc0x1 = math_min_f32(vfpacc0x1, voutput_max_less_zero_point);
- const float vmagic_bias = params->fp32_scalar_magic.magic_bias;
- vfpacc0x0 += vmagic_bias;
- vfpacc0x1 += vmagic_bias;
+ const int32_t vrndacc0x0 = (int32_t) lrintf(vfpacc0x0);
+ const int32_t vrndacc0x1 = (int32_t) lrintf(vfpacc0x1);
- const int32_t vmagic_bias_less_output_zero_point = params->fp32_scalar_magic.magic_bias_less_output_zero_point;
- int32_t vout0x0 = (int32_t) fp32_to_bits(vfpacc0x0) - vmagic_bias_less_output_zero_point;
- int32_t vout0x1 = (int32_t) fp32_to_bits(vfpacc0x1) - vmagic_bias_less_output_zero_point;
+ const int32_t voutput_zero_point = params->fp32_scalar_lrintf.output_zero_point;
+ int32_t vout0x0 = vrndacc0x0 + voutput_zero_point;
+ int32_t vout0x1 = vrndacc0x1 + voutput_zero_point;
if XNN_LIKELY(nc >= 2) {
c0[0] = (uint8_t) vout0x0;
diff --git a/src/qu8-igemm/gen/1x4-minmax-fp32-scalar-magic.c b/src/qu8-igemm/gen/1x4-minmax-fp32-scalar-fmagic.c
similarity index 91%
rename from src/qu8-igemm/gen/1x4-minmax-fp32-scalar-magic.c
rename to src/qu8-igemm/gen/1x4-minmax-fp32-scalar-fmagic.c
index 153865d..ce02fd1 100644
--- a/src/qu8-igemm/gen/1x4-minmax-fp32-scalar-magic.c
+++ b/src/qu8-igemm/gen/1x4-minmax-fp32-scalar-fmagic.c
@@ -15,7 +15,7 @@
#include <xnnpack/gemm.h>
-void xnn_qu8_igemm_minmax_fp32_ukernel_1x4__scalar_magic(
+void xnn_qu8_igemm_minmax_fp32_ukernel_1x4__scalar_fmagic(
size_t mr,
size_t nc,
size_t kc,
@@ -41,7 +41,7 @@
uint8_t* c0 = c;
- const int32_t vb_zero_point = params->fp32_scalar_magic.kernel_zero_point;
+ const int32_t vb_zero_point = params->fp32_scalar_fmagic.kernel_zero_point;
do {
int32_t vacc0x0 = ((const int32_t*) w)[0];
int32_t vacc0x1 = ((const int32_t*) w)[1];
@@ -83,31 +83,31 @@
float vfpacc0x2 = (float) vacc0x2;
float vfpacc0x3 = (float) vacc0x3;
- const float vscale = params->fp32_scalar_magic.scale;
+ const float vscale = params->fp32_scalar_fmagic.scale;
vfpacc0x0 *= vscale;
vfpacc0x1 *= vscale;
vfpacc0x2 *= vscale;
vfpacc0x3 *= vscale;
- const float voutput_min_less_zero_point = params->fp32_scalar_magic.output_min_less_zero_point;
+ const float voutput_min_less_zero_point = params->fp32_scalar_fmagic.output_min_less_zero_point;
vfpacc0x0 = math_max_f32(vfpacc0x0, voutput_min_less_zero_point);
vfpacc0x1 = math_max_f32(vfpacc0x1, voutput_min_less_zero_point);
vfpacc0x2 = math_max_f32(vfpacc0x2, voutput_min_less_zero_point);
vfpacc0x3 = math_max_f32(vfpacc0x3, voutput_min_less_zero_point);
- const float voutput_max_less_zero_point = params->fp32_scalar_magic.output_max_less_zero_point;
+ const float voutput_max_less_zero_point = params->fp32_scalar_fmagic.output_max_less_zero_point;
vfpacc0x0 = math_min_f32(vfpacc0x0, voutput_max_less_zero_point);
vfpacc0x1 = math_min_f32(vfpacc0x1, voutput_max_less_zero_point);
vfpacc0x2 = math_min_f32(vfpacc0x2, voutput_max_less_zero_point);
vfpacc0x3 = math_min_f32(vfpacc0x3, voutput_max_less_zero_point);
- const float vmagic_bias = params->fp32_scalar_magic.magic_bias;
+ const float vmagic_bias = params->fp32_scalar_fmagic.magic_bias;
vfpacc0x0 += vmagic_bias;
vfpacc0x1 += vmagic_bias;
vfpacc0x2 += vmagic_bias;
vfpacc0x3 += vmagic_bias;
- const int32_t vmagic_bias_less_output_zero_point = params->fp32_scalar_magic.magic_bias_less_output_zero_point;
+ const int32_t vmagic_bias_less_output_zero_point = params->fp32_scalar_fmagic.magic_bias_less_output_zero_point;
int32_t vout0x0 = (int32_t) fp32_to_bits(vfpacc0x0) - vmagic_bias_less_output_zero_point;
int32_t vout0x1 = (int32_t) fp32_to_bits(vfpacc0x1) - vmagic_bias_less_output_zero_point;
int32_t vout0x2 = (int32_t) fp32_to_bits(vfpacc0x2) - vmagic_bias_less_output_zero_point;
diff --git a/src/qu8-igemm/gen/1x4-minmax-fp32-scalar-lrint.c b/src/qu8-igemm/gen/1x4-minmax-fp32-scalar-lrint.c
deleted file mode 100644
index b24d83e..0000000
--- a/src/qu8-igemm/gen/1x4-minmax-fp32-scalar-lrint.c
+++ /dev/null
@@ -1,138 +0,0 @@
-// Auto-generated file. Do not edit!
-// Template: src/qs8-igemm/scalar.c.in
-// Generator: tools/xngen
-//
-// Copyright 2021 Google LLC
-//
-// This source code is licensed under the BSD-style license found in the
-// LICENSE file in the root directory of this source tree.
-
-#include <assert.h>
-#include <math.h>
-
-#include <xnnpack/math.h>
-#include <xnnpack/gemm.h>
-
-
-void xnn_qu8_igemm_minmax_fp32_ukernel_1x4__scalar_lrint(
- size_t mr,
- size_t nc,
- size_t kc,
- size_t ks,
- const uint8_t**restrict a,
- const void*restrict w,
- uint8_t*restrict c,
- size_t cm_stride,
- size_t cn_stride,
- size_t a_offset,
- const uint8_t* zero,
- const union xnn_qu8_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
-{
- assert(mr != 0);
- assert(mr <= 1);
- assert(nc != 0);
- assert(kc != 0);
- assert(ks != 0);
- assert(ks % (1 * sizeof(void*)) == 0);
- assert(a != NULL);
- assert(w != NULL);
- assert(c != NULL);
-
- uint8_t* c0 = c;
-
- const int32_t vb_zero_point = params->fp32_scalar_lrint.kernel_zero_point;
- do {
- int32_t vacc0x0 = ((const int32_t*) w)[0];
- int32_t vacc0x1 = ((const int32_t*) w)[1];
- int32_t vacc0x2 = ((const int32_t*) w)[2];
- int32_t vacc0x3 = ((const int32_t*) w)[3];
- w = (const void*) ((const int32_t*) w + 4);
-
- size_t p = ks;
- do {
- const uint8_t* restrict a0 = a[0];
- assert(a0 != NULL);
- if XNN_UNPREDICTABLE(a0 != zero) {
- a0 = (const uint8_t*) ((uintptr_t) a0 + a_offset);
- }
- a += 1;
-
- size_t k = kc;
- do {
- const int32_t va0 = (int32_t) (uint32_t) *a0++;
-
- const int32_t vb0 = (int32_t) (uint32_t) ((const uint8_t*) w)[0] - vb_zero_point;
- const int32_t vb1 = (int32_t) (uint32_t) ((const uint8_t*) w)[1] - vb_zero_point;
- const int32_t vb2 = (int32_t) (uint32_t) ((const uint8_t*) w)[2] - vb_zero_point;
- const int32_t vb3 = (int32_t) (uint32_t) ((const uint8_t*) w)[3] - vb_zero_point;
- w = (const void*) ((const uint8_t*) w + 4);
-
- vacc0x0 += va0 * vb0;
- vacc0x1 += va0 * vb1;
- vacc0x2 += va0 * vb2;
- vacc0x3 += va0 * vb3;
-
- k -= sizeof(uint8_t);
- } while (k != 0);
- p -= 1 * sizeof(void*);
- } while (p != 0);
-
- float vfpacc0x0 = (float) vacc0x0;
- float vfpacc0x1 = (float) vacc0x1;
- float vfpacc0x2 = (float) vacc0x2;
- float vfpacc0x3 = (float) vacc0x3;
-
- const float vscale = params->fp32_scalar_lrint.scale;
- vfpacc0x0 *= vscale;
- vfpacc0x1 *= vscale;
- vfpacc0x2 *= vscale;
- vfpacc0x3 *= vscale;
-
- long vrndacc0x0 = lrintf(vfpacc0x0);
- long vrndacc0x1 = lrintf(vfpacc0x1);
- long vrndacc0x2 = lrintf(vfpacc0x2);
- long vrndacc0x3 = lrintf(vfpacc0x3);
-
- const long voutput_min_less_zero_point = params->fp32_scalar_lrint.output_min_less_zero_point;
- vrndacc0x0 = XNN_UNPREDICTABLE(vrndacc0x0 < voutput_min_less_zero_point) ? voutput_min_less_zero_point : vrndacc0x0;
- vrndacc0x1 = XNN_UNPREDICTABLE(vrndacc0x1 < voutput_min_less_zero_point) ? voutput_min_less_zero_point : vrndacc0x1;
- vrndacc0x2 = XNN_UNPREDICTABLE(vrndacc0x2 < voutput_min_less_zero_point) ? voutput_min_less_zero_point : vrndacc0x2;
- vrndacc0x3 = XNN_UNPREDICTABLE(vrndacc0x3 < voutput_min_less_zero_point) ? voutput_min_less_zero_point : vrndacc0x3;
-
- const long voutput_max_less_zero_point = params->fp32_scalar_lrint.output_max_less_zero_point;
- vrndacc0x0 = XNN_UNPREDICTABLE(vrndacc0x0 > voutput_max_less_zero_point) ? voutput_max_less_zero_point : vrndacc0x0;
- vrndacc0x1 = XNN_UNPREDICTABLE(vrndacc0x1 > voutput_max_less_zero_point) ? voutput_max_less_zero_point : vrndacc0x1;
- vrndacc0x2 = XNN_UNPREDICTABLE(vrndacc0x2 > voutput_max_less_zero_point) ? voutput_max_less_zero_point : vrndacc0x2;
- vrndacc0x3 = XNN_UNPREDICTABLE(vrndacc0x3 > voutput_max_less_zero_point) ? voutput_max_less_zero_point : vrndacc0x3;
-
- const int32_t voutput_zero_point = params->fp32_scalar_lrint.output_zero_point;
- int32_t vout0x0 = (int32_t) vrndacc0x0 + voutput_zero_point;
- int32_t vout0x1 = (int32_t) vrndacc0x1 + voutput_zero_point;
- int32_t vout0x2 = (int32_t) vrndacc0x2 + voutput_zero_point;
- int32_t vout0x3 = (int32_t) vrndacc0x3 + voutput_zero_point;
-
- if XNN_LIKELY(nc >= 4) {
- c0[0] = (uint8_t) vout0x0;
- c0[1] = (uint8_t) vout0x1;
- c0[2] = (uint8_t) vout0x2;
- c0[3] = (uint8_t) vout0x3;
-
- c0 = (uint8_t*) ((uintptr_t) c0 + cn_stride);
-
- a = (const uint8_t**restrict) ((uintptr_t) a - ks);
- nc -= 4;
- } else {
- if (nc & 2) {
- c0[0] = (uint8_t) vout0x0;
- c0[1] = (uint8_t) vout0x1;
- vout0x0 = vout0x2;
- c0 += 2;
- }
- if (nc & 1) {
- c0[0] = (uint8_t) vout0x0;
- }
-
- nc = 0;
- }
- } while (nc != 0);
-}
diff --git a/src/qu8-igemm/gen/1x4-minmax-fp32-scalar-magic.c b/src/qu8-igemm/gen/1x4-minmax-fp32-scalar-lrintf.c
similarity index 78%
copy from src/qu8-igemm/gen/1x4-minmax-fp32-scalar-magic.c
copy to src/qu8-igemm/gen/1x4-minmax-fp32-scalar-lrintf.c
index 153865d..ae07ec2 100644
--- a/src/qu8-igemm/gen/1x4-minmax-fp32-scalar-magic.c
+++ b/src/qu8-igemm/gen/1x4-minmax-fp32-scalar-lrintf.c
@@ -8,14 +8,13 @@
// LICENSE file in the root directory of this source tree.
#include <assert.h>
-
-#include <fp16.h>
+#include <math.h>
#include <xnnpack/math.h>
#include <xnnpack/gemm.h>
-void xnn_qu8_igemm_minmax_fp32_ukernel_1x4__scalar_magic(
+void xnn_qu8_igemm_minmax_fp32_ukernel_1x4__scalar_lrintf(
size_t mr,
size_t nc,
size_t kc,
@@ -41,7 +40,7 @@
uint8_t* c0 = c;
- const int32_t vb_zero_point = params->fp32_scalar_magic.kernel_zero_point;
+ const int32_t vb_zero_point = params->fp32_scalar_lrintf.kernel_zero_point;
do {
int32_t vacc0x0 = ((const int32_t*) w)[0];
int32_t vacc0x1 = ((const int32_t*) w)[1];
@@ -83,35 +82,34 @@
float vfpacc0x2 = (float) vacc0x2;
float vfpacc0x3 = (float) vacc0x3;
- const float vscale = params->fp32_scalar_magic.scale;
+ const float vscale = params->fp32_scalar_lrintf.scale;
vfpacc0x0 *= vscale;
vfpacc0x1 *= vscale;
vfpacc0x2 *= vscale;
vfpacc0x3 *= vscale;
- const float voutput_min_less_zero_point = params->fp32_scalar_magic.output_min_less_zero_point;
+ const float voutput_min_less_zero_point = params->fp32_scalar_lrintf.output_min_less_zero_point;
vfpacc0x0 = math_max_f32(vfpacc0x0, voutput_min_less_zero_point);
vfpacc0x1 = math_max_f32(vfpacc0x1, voutput_min_less_zero_point);
vfpacc0x2 = math_max_f32(vfpacc0x2, voutput_min_less_zero_point);
vfpacc0x3 = math_max_f32(vfpacc0x3, voutput_min_less_zero_point);
- const float voutput_max_less_zero_point = params->fp32_scalar_magic.output_max_less_zero_point;
+ const float voutput_max_less_zero_point = params->fp32_scalar_lrintf.output_max_less_zero_point;
vfpacc0x0 = math_min_f32(vfpacc0x0, voutput_max_less_zero_point);
vfpacc0x1 = math_min_f32(vfpacc0x1, voutput_max_less_zero_point);
vfpacc0x2 = math_min_f32(vfpacc0x2, voutput_max_less_zero_point);
vfpacc0x3 = math_min_f32(vfpacc0x3, voutput_max_less_zero_point);
- const float vmagic_bias = params->fp32_scalar_magic.magic_bias;
- vfpacc0x0 += vmagic_bias;
- vfpacc0x1 += vmagic_bias;
- vfpacc0x2 += vmagic_bias;
- vfpacc0x3 += vmagic_bias;
+ const int32_t vrndacc0x0 = (int32_t) lrintf(vfpacc0x0);
+ const int32_t vrndacc0x1 = (int32_t) lrintf(vfpacc0x1);
+ const int32_t vrndacc0x2 = (int32_t) lrintf(vfpacc0x2);
+ const int32_t vrndacc0x3 = (int32_t) lrintf(vfpacc0x3);
- const int32_t vmagic_bias_less_output_zero_point = params->fp32_scalar_magic.magic_bias_less_output_zero_point;
- int32_t vout0x0 = (int32_t) fp32_to_bits(vfpacc0x0) - vmagic_bias_less_output_zero_point;
- int32_t vout0x1 = (int32_t) fp32_to_bits(vfpacc0x1) - vmagic_bias_less_output_zero_point;
- int32_t vout0x2 = (int32_t) fp32_to_bits(vfpacc0x2) - vmagic_bias_less_output_zero_point;
- int32_t vout0x3 = (int32_t) fp32_to_bits(vfpacc0x3) - vmagic_bias_less_output_zero_point;
+ const int32_t voutput_zero_point = params->fp32_scalar_lrintf.output_zero_point;
+ int32_t vout0x0 = vrndacc0x0 + voutput_zero_point;
+ int32_t vout0x1 = vrndacc0x1 + voutput_zero_point;
+ int32_t vout0x2 = vrndacc0x2 + voutput_zero_point;
+ int32_t vout0x3 = vrndacc0x3 + voutput_zero_point;
if XNN_LIKELY(nc >= 4) {
c0[0] = (uint8_t) vout0x0;
diff --git a/src/qu8-igemm/gen/2x2-minmax-fp32-scalar-magic.c b/src/qu8-igemm/gen/2x2-minmax-fp32-scalar-fmagic.c
similarity index 91%
rename from src/qu8-igemm/gen/2x2-minmax-fp32-scalar-magic.c
rename to src/qu8-igemm/gen/2x2-minmax-fp32-scalar-fmagic.c
index 3b6f68c..b12543f 100644
--- a/src/qu8-igemm/gen/2x2-minmax-fp32-scalar-magic.c
+++ b/src/qu8-igemm/gen/2x2-minmax-fp32-scalar-fmagic.c
@@ -15,7 +15,7 @@
#include <xnnpack/gemm.h>
-void xnn_qu8_igemm_minmax_fp32_ukernel_2x2__scalar_magic(
+void xnn_qu8_igemm_minmax_fp32_ukernel_2x2__scalar_fmagic(
size_t mr,
size_t nc,
size_t kc,
@@ -45,7 +45,7 @@
c1 = c0;
}
- const int32_t vb_zero_point = params->fp32_scalar_magic.kernel_zero_point;
+ const int32_t vb_zero_point = params->fp32_scalar_fmagic.kernel_zero_point;
do {
int32_t vacc0x0 = ((const int32_t*) w)[0];
int32_t vacc0x1 = ((const int32_t*) w)[1];
@@ -91,31 +91,31 @@
float vfpacc1x0 = (float) vacc1x0;
float vfpacc1x1 = (float) vacc1x1;
- const float vscale = params->fp32_scalar_magic.scale;
+ const float vscale = params->fp32_scalar_fmagic.scale;
vfpacc0x0 *= vscale;
vfpacc0x1 *= vscale;
vfpacc1x0 *= vscale;
vfpacc1x1 *= vscale;
- const float voutput_min_less_zero_point = params->fp32_scalar_magic.output_min_less_zero_point;
+ const float voutput_min_less_zero_point = params->fp32_scalar_fmagic.output_min_less_zero_point;
vfpacc0x0 = math_max_f32(vfpacc0x0, voutput_min_less_zero_point);
vfpacc0x1 = math_max_f32(vfpacc0x1, voutput_min_less_zero_point);
vfpacc1x0 = math_max_f32(vfpacc1x0, voutput_min_less_zero_point);
vfpacc1x1 = math_max_f32(vfpacc1x1, voutput_min_less_zero_point);
- const float voutput_max_less_zero_point = params->fp32_scalar_magic.output_max_less_zero_point;
+ const float voutput_max_less_zero_point = params->fp32_scalar_fmagic.output_max_less_zero_point;
vfpacc0x0 = math_min_f32(vfpacc0x0, voutput_max_less_zero_point);
vfpacc0x1 = math_min_f32(vfpacc0x1, voutput_max_less_zero_point);
vfpacc1x0 = math_min_f32(vfpacc1x0, voutput_max_less_zero_point);
vfpacc1x1 = math_min_f32(vfpacc1x1, voutput_max_less_zero_point);
- const float vmagic_bias = params->fp32_scalar_magic.magic_bias;
+ const float vmagic_bias = params->fp32_scalar_fmagic.magic_bias;
vfpacc0x0 += vmagic_bias;
vfpacc0x1 += vmagic_bias;
vfpacc1x0 += vmagic_bias;
vfpacc1x1 += vmagic_bias;
- const int32_t vmagic_bias_less_output_zero_point = params->fp32_scalar_magic.magic_bias_less_output_zero_point;
+ const int32_t vmagic_bias_less_output_zero_point = params->fp32_scalar_fmagic.magic_bias_less_output_zero_point;
int32_t vout0x0 = (int32_t) fp32_to_bits(vfpacc0x0) - vmagic_bias_less_output_zero_point;
int32_t vout0x1 = (int32_t) fp32_to_bits(vfpacc0x1) - vmagic_bias_less_output_zero_point;
int32_t vout1x0 = (int32_t) fp32_to_bits(vfpacc1x0) - vmagic_bias_less_output_zero_point;
diff --git a/src/qu8-igemm/gen/2x2-minmax-fp32-scalar-lrint.c b/src/qu8-igemm/gen/2x2-minmax-fp32-scalar-lrint.c
deleted file mode 100644
index ba1ee7c..0000000
--- a/src/qu8-igemm/gen/2x2-minmax-fp32-scalar-lrint.c
+++ /dev/null
@@ -1,142 +0,0 @@
-// Auto-generated file. Do not edit!
-// Template: src/qs8-igemm/scalar.c.in
-// Generator: tools/xngen
-//
-// Copyright 2021 Google LLC
-//
-// This source code is licensed under the BSD-style license found in the
-// LICENSE file in the root directory of this source tree.
-
-#include <assert.h>
-#include <math.h>
-
-#include <xnnpack/math.h>
-#include <xnnpack/gemm.h>
-
-
-void xnn_qu8_igemm_minmax_fp32_ukernel_2x2__scalar_lrint(
- size_t mr,
- size_t nc,
- size_t kc,
- size_t ks,
- const uint8_t**restrict a,
- const void*restrict w,
- uint8_t*restrict c,
- size_t cm_stride,
- size_t cn_stride,
- size_t a_offset,
- const uint8_t* zero,
- const union xnn_qu8_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
-{
- assert(mr != 0);
- assert(mr <= 2);
- assert(nc != 0);
- assert(kc != 0);
- assert(ks != 0);
- assert(ks % (2 * sizeof(void*)) == 0);
- assert(a != NULL);
- assert(w != NULL);
- assert(c != NULL);
-
- uint8_t* c0 = c;
- uint8_t* c1 = (uint8_t*) ((uintptr_t) c0 + cm_stride);
- if XNN_UNPREDICTABLE(mr != 2) {
- c1 = c0;
- }
-
- const int32_t vb_zero_point = params->fp32_scalar_lrint.kernel_zero_point;
- do {
- int32_t vacc0x0 = ((const int32_t*) w)[0];
- int32_t vacc0x1 = ((const int32_t*) w)[1];
- int32_t vacc1x0 = vacc0x0;
- int32_t vacc1x1 = vacc0x1;
- w = (const void*) ((const int32_t*) w + 2);
-
- size_t p = ks;
- do {
- const uint8_t* restrict a0 = a[0];
- assert(a0 != NULL);
- if XNN_UNPREDICTABLE(a0 != zero) {
- a0 = (const uint8_t*) ((uintptr_t) a0 + a_offset);
- }
- const uint8_t* restrict a1 = a[1];
- assert(a1 != NULL);
- if XNN_UNPREDICTABLE(a1 != zero) {
- a1 = (const uint8_t*) ((uintptr_t) a1 + a_offset);
- }
- a += 2;
-
- size_t k = kc;
- do {
- const int32_t va0 = (int32_t) (uint32_t) *a0++;
- const int32_t va1 = (int32_t) (uint32_t) *a1++;
-
- const int32_t vb0 = (int32_t) (uint32_t) ((const uint8_t*) w)[0] - vb_zero_point;
- const int32_t vb1 = (int32_t) (uint32_t) ((const uint8_t*) w)[1] - vb_zero_point;
- w = (const void*) ((const uint8_t*) w + 2);
-
- vacc0x0 += va0 * vb0;
- vacc0x1 += va0 * vb1;
- vacc1x0 += va1 * vb0;
- vacc1x1 += va1 * vb1;
-
- k -= sizeof(uint8_t);
- } while (k != 0);
- p -= 2 * sizeof(void*);
- } while (p != 0);
-
- float vfpacc0x0 = (float) vacc0x0;
- float vfpacc0x1 = (float) vacc0x1;
- float vfpacc1x0 = (float) vacc1x0;
- float vfpacc1x1 = (float) vacc1x1;
-
- const float vscale = params->fp32_scalar_lrint.scale;
- vfpacc0x0 *= vscale;
- vfpacc0x1 *= vscale;
- vfpacc1x0 *= vscale;
- vfpacc1x1 *= vscale;
-
- long vrndacc0x0 = lrintf(vfpacc0x0);
- long vrndacc0x1 = lrintf(vfpacc0x1);
- long vrndacc1x0 = lrintf(vfpacc1x0);
- long vrndacc1x1 = lrintf(vfpacc1x1);
-
- const long voutput_min_less_zero_point = params->fp32_scalar_lrint.output_min_less_zero_point;
- vrndacc0x0 = XNN_UNPREDICTABLE(vrndacc0x0 < voutput_min_less_zero_point) ? voutput_min_less_zero_point : vrndacc0x0;
- vrndacc0x1 = XNN_UNPREDICTABLE(vrndacc0x1 < voutput_min_less_zero_point) ? voutput_min_less_zero_point : vrndacc0x1;
- vrndacc1x0 = XNN_UNPREDICTABLE(vrndacc1x0 < voutput_min_less_zero_point) ? voutput_min_less_zero_point : vrndacc1x0;
- vrndacc1x1 = XNN_UNPREDICTABLE(vrndacc1x1 < voutput_min_less_zero_point) ? voutput_min_less_zero_point : vrndacc1x1;
-
- const long voutput_max_less_zero_point = params->fp32_scalar_lrint.output_max_less_zero_point;
- vrndacc0x0 = XNN_UNPREDICTABLE(vrndacc0x0 > voutput_max_less_zero_point) ? voutput_max_less_zero_point : vrndacc0x0;
- vrndacc0x1 = XNN_UNPREDICTABLE(vrndacc0x1 > voutput_max_less_zero_point) ? voutput_max_less_zero_point : vrndacc0x1;
- vrndacc1x0 = XNN_UNPREDICTABLE(vrndacc1x0 > voutput_max_less_zero_point) ? voutput_max_less_zero_point : vrndacc1x0;
- vrndacc1x1 = XNN_UNPREDICTABLE(vrndacc1x1 > voutput_max_less_zero_point) ? voutput_max_less_zero_point : vrndacc1x1;
-
- const int32_t voutput_zero_point = params->fp32_scalar_lrint.output_zero_point;
- int32_t vout0x0 = (int32_t) vrndacc0x0 + voutput_zero_point;
- int32_t vout0x1 = (int32_t) vrndacc0x1 + voutput_zero_point;
- int32_t vout1x0 = (int32_t) vrndacc1x0 + voutput_zero_point;
- int32_t vout1x1 = (int32_t) vrndacc1x1 + voutput_zero_point;
-
- if XNN_LIKELY(nc >= 2) {
- c1[0] = (uint8_t) vout1x0;
- c1[1] = (uint8_t) vout1x1;
- c0[0] = (uint8_t) vout0x0;
- c0[1] = (uint8_t) vout0x1;
-
- c1 = (uint8_t*) ((uintptr_t) c1 + cn_stride);
- c0 = (uint8_t*) ((uintptr_t) c0 + cn_stride);
-
- a = (const uint8_t**restrict) ((uintptr_t) a - ks);
- nc -= 2;
- } else {
- if (nc & 1) {
- c1[0] = (uint8_t) vout1x0;
- c0[0] = (uint8_t) vout0x0;
- }
-
- nc = 0;
- }
- } while (nc != 0);
-}
diff --git a/src/qu8-igemm/gen/2x2-minmax-fp32-scalar-magic.c b/src/qu8-igemm/gen/2x2-minmax-fp32-scalar-lrintf.c
similarity index 78%
copy from src/qu8-igemm/gen/2x2-minmax-fp32-scalar-magic.c
copy to src/qu8-igemm/gen/2x2-minmax-fp32-scalar-lrintf.c
index 3b6f68c..99e23a2 100644
--- a/src/qu8-igemm/gen/2x2-minmax-fp32-scalar-magic.c
+++ b/src/qu8-igemm/gen/2x2-minmax-fp32-scalar-lrintf.c
@@ -8,14 +8,13 @@
// LICENSE file in the root directory of this source tree.
#include <assert.h>
-
-#include <fp16.h>
+#include <math.h>
#include <xnnpack/math.h>
#include <xnnpack/gemm.h>
-void xnn_qu8_igemm_minmax_fp32_ukernel_2x2__scalar_magic(
+void xnn_qu8_igemm_minmax_fp32_ukernel_2x2__scalar_lrintf(
size_t mr,
size_t nc,
size_t kc,
@@ -45,7 +44,7 @@
c1 = c0;
}
- const int32_t vb_zero_point = params->fp32_scalar_magic.kernel_zero_point;
+ const int32_t vb_zero_point = params->fp32_scalar_lrintf.kernel_zero_point;
do {
int32_t vacc0x0 = ((const int32_t*) w)[0];
int32_t vacc0x1 = ((const int32_t*) w)[1];
@@ -91,35 +90,34 @@
float vfpacc1x0 = (float) vacc1x0;
float vfpacc1x1 = (float) vacc1x1;
- const float vscale = params->fp32_scalar_magic.scale;
+ const float vscale = params->fp32_scalar_lrintf.scale;
vfpacc0x0 *= vscale;
vfpacc0x1 *= vscale;
vfpacc1x0 *= vscale;
vfpacc1x1 *= vscale;
- const float voutput_min_less_zero_point = params->fp32_scalar_magic.output_min_less_zero_point;
+ const float voutput_min_less_zero_point = params->fp32_scalar_lrintf.output_min_less_zero_point;
vfpacc0x0 = math_max_f32(vfpacc0x0, voutput_min_less_zero_point);
vfpacc0x1 = math_max_f32(vfpacc0x1, voutput_min_less_zero_point);
vfpacc1x0 = math_max_f32(vfpacc1x0, voutput_min_less_zero_point);
vfpacc1x1 = math_max_f32(vfpacc1x1, voutput_min_less_zero_point);
- const float voutput_max_less_zero_point = params->fp32_scalar_magic.output_max_less_zero_point;
+ const float voutput_max_less_zero_point = params->fp32_scalar_lrintf.output_max_less_zero_point;
vfpacc0x0 = math_min_f32(vfpacc0x0, voutput_max_less_zero_point);
vfpacc0x1 = math_min_f32(vfpacc0x1, voutput_max_less_zero_point);
vfpacc1x0 = math_min_f32(vfpacc1x0, voutput_max_less_zero_point);
vfpacc1x1 = math_min_f32(vfpacc1x1, voutput_max_less_zero_point);
- const float vmagic_bias = params->fp32_scalar_magic.magic_bias;
- vfpacc0x0 += vmagic_bias;
- vfpacc0x1 += vmagic_bias;
- vfpacc1x0 += vmagic_bias;
- vfpacc1x1 += vmagic_bias;
+ const int32_t vrndacc0x0 = (int32_t) lrintf(vfpacc0x0);
+ const int32_t vrndacc0x1 = (int32_t) lrintf(vfpacc0x1);
+ const int32_t vrndacc1x0 = (int32_t) lrintf(vfpacc1x0);
+ const int32_t vrndacc1x1 = (int32_t) lrintf(vfpacc1x1);
- const int32_t vmagic_bias_less_output_zero_point = params->fp32_scalar_magic.magic_bias_less_output_zero_point;
- int32_t vout0x0 = (int32_t) fp32_to_bits(vfpacc0x0) - vmagic_bias_less_output_zero_point;
- int32_t vout0x1 = (int32_t) fp32_to_bits(vfpacc0x1) - vmagic_bias_less_output_zero_point;
- int32_t vout1x0 = (int32_t) fp32_to_bits(vfpacc1x0) - vmagic_bias_less_output_zero_point;
- int32_t vout1x1 = (int32_t) fp32_to_bits(vfpacc1x1) - vmagic_bias_less_output_zero_point;
+ const int32_t voutput_zero_point = params->fp32_scalar_lrintf.output_zero_point;
+ int32_t vout0x0 = vrndacc0x0 + voutput_zero_point;
+ int32_t vout0x1 = vrndacc0x1 + voutput_zero_point;
+ int32_t vout1x0 = vrndacc1x0 + voutput_zero_point;
+ int32_t vout1x1 = vrndacc1x1 + voutput_zero_point;
if XNN_LIKELY(nc >= 2) {
c1[0] = (uint8_t) vout1x0;
diff --git a/src/qu8-igemm/gen/2x4-minmax-fp32-scalar-magic.c b/src/qu8-igemm/gen/2x4-minmax-fp32-scalar-fmagic.c
similarity index 93%
rename from src/qu8-igemm/gen/2x4-minmax-fp32-scalar-magic.c
rename to src/qu8-igemm/gen/2x4-minmax-fp32-scalar-fmagic.c
index 8b1416f..54d6870 100644
--- a/src/qu8-igemm/gen/2x4-minmax-fp32-scalar-magic.c
+++ b/src/qu8-igemm/gen/2x4-minmax-fp32-scalar-fmagic.c
@@ -15,7 +15,7 @@
#include <xnnpack/gemm.h>
-void xnn_qu8_igemm_minmax_fp32_ukernel_2x4__scalar_magic(
+void xnn_qu8_igemm_minmax_fp32_ukernel_2x4__scalar_fmagic(
size_t mr,
size_t nc,
size_t kc,
@@ -45,7 +45,7 @@
c1 = c0;
}
- const int32_t vb_zero_point = params->fp32_scalar_magic.kernel_zero_point;
+ const int32_t vb_zero_point = params->fp32_scalar_fmagic.kernel_zero_point;
do {
int32_t vacc0x0 = ((const int32_t*) w)[0];
int32_t vacc0x1 = ((const int32_t*) w)[1];
@@ -105,7 +105,7 @@
float vfpacc1x2 = (float) vacc1x2;
float vfpacc1x3 = (float) vacc1x3;
- const float vscale = params->fp32_scalar_magic.scale;
+ const float vscale = params->fp32_scalar_fmagic.scale;
vfpacc0x0 *= vscale;
vfpacc0x1 *= vscale;
vfpacc0x2 *= vscale;
@@ -115,7 +115,7 @@
vfpacc1x2 *= vscale;
vfpacc1x3 *= vscale;
- const float voutput_min_less_zero_point = params->fp32_scalar_magic.output_min_less_zero_point;
+ const float voutput_min_less_zero_point = params->fp32_scalar_fmagic.output_min_less_zero_point;
vfpacc0x0 = math_max_f32(vfpacc0x0, voutput_min_less_zero_point);
vfpacc0x1 = math_max_f32(vfpacc0x1, voutput_min_less_zero_point);
vfpacc0x2 = math_max_f32(vfpacc0x2, voutput_min_less_zero_point);
@@ -125,7 +125,7 @@
vfpacc1x2 = math_max_f32(vfpacc1x2, voutput_min_less_zero_point);
vfpacc1x3 = math_max_f32(vfpacc1x3, voutput_min_less_zero_point);
- const float voutput_max_less_zero_point = params->fp32_scalar_magic.output_max_less_zero_point;
+ const float voutput_max_less_zero_point = params->fp32_scalar_fmagic.output_max_less_zero_point;
vfpacc0x0 = math_min_f32(vfpacc0x0, voutput_max_less_zero_point);
vfpacc0x1 = math_min_f32(vfpacc0x1, voutput_max_less_zero_point);
vfpacc0x2 = math_min_f32(vfpacc0x2, voutput_max_less_zero_point);
@@ -135,7 +135,7 @@
vfpacc1x2 = math_min_f32(vfpacc1x2, voutput_max_less_zero_point);
vfpacc1x3 = math_min_f32(vfpacc1x3, voutput_max_less_zero_point);
- const float vmagic_bias = params->fp32_scalar_magic.magic_bias;
+ const float vmagic_bias = params->fp32_scalar_fmagic.magic_bias;
vfpacc0x0 += vmagic_bias;
vfpacc0x1 += vmagic_bias;
vfpacc0x2 += vmagic_bias;
@@ -145,7 +145,7 @@
vfpacc1x2 += vmagic_bias;
vfpacc1x3 += vmagic_bias;
- const int32_t vmagic_bias_less_output_zero_point = params->fp32_scalar_magic.magic_bias_less_output_zero_point;
+ const int32_t vmagic_bias_less_output_zero_point = params->fp32_scalar_fmagic.magic_bias_less_output_zero_point;
int32_t vout0x0 = (int32_t) fp32_to_bits(vfpacc0x0) - vmagic_bias_less_output_zero_point;
int32_t vout0x1 = (int32_t) fp32_to_bits(vfpacc0x1) - vmagic_bias_less_output_zero_point;
int32_t vout0x2 = (int32_t) fp32_to_bits(vfpacc0x2) - vmagic_bias_less_output_zero_point;
diff --git a/src/qu8-igemm/gen/2x4-minmax-fp32-scalar-lrint.c b/src/qu8-igemm/gen/2x4-minmax-fp32-scalar-lrint.c
deleted file mode 100644
index 88f70c2..0000000
--- a/src/qu8-igemm/gen/2x4-minmax-fp32-scalar-lrint.c
+++ /dev/null
@@ -1,190 +0,0 @@
-// Auto-generated file. Do not edit!
-// Template: src/qs8-igemm/scalar.c.in
-// Generator: tools/xngen
-//
-// Copyright 2021 Google LLC
-//
-// This source code is licensed under the BSD-style license found in the
-// LICENSE file in the root directory of this source tree.
-
-#include <assert.h>
-#include <math.h>
-
-#include <xnnpack/math.h>
-#include <xnnpack/gemm.h>
-
-
-void xnn_qu8_igemm_minmax_fp32_ukernel_2x4__scalar_lrint(
- size_t mr,
- size_t nc,
- size_t kc,
- size_t ks,
- const uint8_t**restrict a,
- const void*restrict w,
- uint8_t*restrict c,
- size_t cm_stride,
- size_t cn_stride,
- size_t a_offset,
- const uint8_t* zero,
- const union xnn_qu8_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
-{
- assert(mr != 0);
- assert(mr <= 2);
- assert(nc != 0);
- assert(kc != 0);
- assert(ks != 0);
- assert(ks % (2 * sizeof(void*)) == 0);
- assert(a != NULL);
- assert(w != NULL);
- assert(c != NULL);
-
- uint8_t* c0 = c;
- uint8_t* c1 = (uint8_t*) ((uintptr_t) c0 + cm_stride);
- if XNN_UNPREDICTABLE(mr != 2) {
- c1 = c0;
- }
-
- const int32_t vb_zero_point = params->fp32_scalar_lrint.kernel_zero_point;
- do {
- int32_t vacc0x0 = ((const int32_t*) w)[0];
- int32_t vacc0x1 = ((const int32_t*) w)[1];
- int32_t vacc0x2 = ((const int32_t*) w)[2];
- int32_t vacc0x3 = ((const int32_t*) w)[3];
- int32_t vacc1x0 = vacc0x0;
- int32_t vacc1x1 = vacc0x1;
- int32_t vacc1x2 = vacc0x2;
- int32_t vacc1x3 = vacc0x3;
- w = (const void*) ((const int32_t*) w + 4);
-
- size_t p = ks;
- do {
- const uint8_t* restrict a0 = a[0];
- assert(a0 != NULL);
- if XNN_UNPREDICTABLE(a0 != zero) {
- a0 = (const uint8_t*) ((uintptr_t) a0 + a_offset);
- }
- const uint8_t* restrict a1 = a[1];
- assert(a1 != NULL);
- if XNN_UNPREDICTABLE(a1 != zero) {
- a1 = (const uint8_t*) ((uintptr_t) a1 + a_offset);
- }
- a += 2;
-
- size_t k = kc;
- do {
- const int32_t va0 = (int32_t) (uint32_t) *a0++;
- const int32_t va1 = (int32_t) (uint32_t) *a1++;
-
- const int32_t vb0 = (int32_t) (uint32_t) ((const uint8_t*) w)[0] - vb_zero_point;
- const int32_t vb1 = (int32_t) (uint32_t) ((const uint8_t*) w)[1] - vb_zero_point;
- const int32_t vb2 = (int32_t) (uint32_t) ((const uint8_t*) w)[2] - vb_zero_point;
- const int32_t vb3 = (int32_t) (uint32_t) ((const uint8_t*) w)[3] - vb_zero_point;
- w = (const void*) ((const uint8_t*) w + 4);
-
- vacc0x0 += va0 * vb0;
- vacc0x1 += va0 * vb1;
- vacc0x2 += va0 * vb2;
- vacc0x3 += va0 * vb3;
- vacc1x0 += va1 * vb0;
- vacc1x1 += va1 * vb1;
- vacc1x2 += va1 * vb2;
- vacc1x3 += va1 * vb3;
-
- k -= sizeof(uint8_t);
- } while (k != 0);
- p -= 2 * sizeof(void*);
- } while (p != 0);
-
- float vfpacc0x0 = (float) vacc0x0;
- float vfpacc0x1 = (float) vacc0x1;
- float vfpacc0x2 = (float) vacc0x2;
- float vfpacc0x3 = (float) vacc0x3;
- float vfpacc1x0 = (float) vacc1x0;
- float vfpacc1x1 = (float) vacc1x1;
- float vfpacc1x2 = (float) vacc1x2;
- float vfpacc1x3 = (float) vacc1x3;
-
- const float vscale = params->fp32_scalar_lrint.scale;
- vfpacc0x0 *= vscale;
- vfpacc0x1 *= vscale;
- vfpacc0x2 *= vscale;
- vfpacc0x3 *= vscale;
- vfpacc1x0 *= vscale;
- vfpacc1x1 *= vscale;
- vfpacc1x2 *= vscale;
- vfpacc1x3 *= vscale;
-
- long vrndacc0x0 = lrintf(vfpacc0x0);
- long vrndacc0x1 = lrintf(vfpacc0x1);
- long vrndacc0x2 = lrintf(vfpacc0x2);
- long vrndacc0x3 = lrintf(vfpacc0x3);
- long vrndacc1x0 = lrintf(vfpacc1x0);
- long vrndacc1x1 = lrintf(vfpacc1x1);
- long vrndacc1x2 = lrintf(vfpacc1x2);
- long vrndacc1x3 = lrintf(vfpacc1x3);
-
- const long voutput_min_less_zero_point = params->fp32_scalar_lrint.output_min_less_zero_point;
- vrndacc0x0 = XNN_UNPREDICTABLE(vrndacc0x0 < voutput_min_less_zero_point) ? voutput_min_less_zero_point : vrndacc0x0;
- vrndacc0x1 = XNN_UNPREDICTABLE(vrndacc0x1 < voutput_min_less_zero_point) ? voutput_min_less_zero_point : vrndacc0x1;
- vrndacc0x2 = XNN_UNPREDICTABLE(vrndacc0x2 < voutput_min_less_zero_point) ? voutput_min_less_zero_point : vrndacc0x2;
- vrndacc0x3 = XNN_UNPREDICTABLE(vrndacc0x3 < voutput_min_less_zero_point) ? voutput_min_less_zero_point : vrndacc0x3;
- vrndacc1x0 = XNN_UNPREDICTABLE(vrndacc1x0 < voutput_min_less_zero_point) ? voutput_min_less_zero_point : vrndacc1x0;
- vrndacc1x1 = XNN_UNPREDICTABLE(vrndacc1x1 < voutput_min_less_zero_point) ? voutput_min_less_zero_point : vrndacc1x1;
- vrndacc1x2 = XNN_UNPREDICTABLE(vrndacc1x2 < voutput_min_less_zero_point) ? voutput_min_less_zero_point : vrndacc1x2;
- vrndacc1x3 = XNN_UNPREDICTABLE(vrndacc1x3 < voutput_min_less_zero_point) ? voutput_min_less_zero_point : vrndacc1x3;
-
- const long voutput_max_less_zero_point = params->fp32_scalar_lrint.output_max_less_zero_point;
- vrndacc0x0 = XNN_UNPREDICTABLE(vrndacc0x0 > voutput_max_less_zero_point) ? voutput_max_less_zero_point : vrndacc0x0;
- vrndacc0x1 = XNN_UNPREDICTABLE(vrndacc0x1 > voutput_max_less_zero_point) ? voutput_max_less_zero_point : vrndacc0x1;
- vrndacc0x2 = XNN_UNPREDICTABLE(vrndacc0x2 > voutput_max_less_zero_point) ? voutput_max_less_zero_point : vrndacc0x2;
- vrndacc0x3 = XNN_UNPREDICTABLE(vrndacc0x3 > voutput_max_less_zero_point) ? voutput_max_less_zero_point : vrndacc0x3;
- vrndacc1x0 = XNN_UNPREDICTABLE(vrndacc1x0 > voutput_max_less_zero_point) ? voutput_max_less_zero_point : vrndacc1x0;
- vrndacc1x1 = XNN_UNPREDICTABLE(vrndacc1x1 > voutput_max_less_zero_point) ? voutput_max_less_zero_point : vrndacc1x1;
- vrndacc1x2 = XNN_UNPREDICTABLE(vrndacc1x2 > voutput_max_less_zero_point) ? voutput_max_less_zero_point : vrndacc1x2;
- vrndacc1x3 = XNN_UNPREDICTABLE(vrndacc1x3 > voutput_max_less_zero_point) ? voutput_max_less_zero_point : vrndacc1x3;
-
- const int32_t voutput_zero_point = params->fp32_scalar_lrint.output_zero_point;
- int32_t vout0x0 = (int32_t) vrndacc0x0 + voutput_zero_point;
- int32_t vout0x1 = (int32_t) vrndacc0x1 + voutput_zero_point;
- int32_t vout0x2 = (int32_t) vrndacc0x2 + voutput_zero_point;
- int32_t vout0x3 = (int32_t) vrndacc0x3 + voutput_zero_point;
- int32_t vout1x0 = (int32_t) vrndacc1x0 + voutput_zero_point;
- int32_t vout1x1 = (int32_t) vrndacc1x1 + voutput_zero_point;
- int32_t vout1x2 = (int32_t) vrndacc1x2 + voutput_zero_point;
- int32_t vout1x3 = (int32_t) vrndacc1x3 + voutput_zero_point;
-
- if XNN_LIKELY(nc >= 4) {
- c1[0] = (uint8_t) vout1x0;
- c1[1] = (uint8_t) vout1x1;
- c1[2] = (uint8_t) vout1x2;
- c1[3] = (uint8_t) vout1x3;
- c0[0] = (uint8_t) vout0x0;
- c0[1] = (uint8_t) vout0x1;
- c0[2] = (uint8_t) vout0x2;
- c0[3] = (uint8_t) vout0x3;
-
- c1 = (uint8_t*) ((uintptr_t) c1 + cn_stride);
- c0 = (uint8_t*) ((uintptr_t) c0 + cn_stride);
-
- a = (const uint8_t**restrict) ((uintptr_t) a - ks);
- nc -= 4;
- } else {
- if (nc & 2) {
- c1[0] = (uint8_t) vout1x0;
- c1[1] = (uint8_t) vout1x1;
- vout1x0 = vout1x2;
- c1 += 2;
- c0[0] = (uint8_t) vout0x0;
- c0[1] = (uint8_t) vout0x1;
- vout0x0 = vout0x2;
- c0 += 2;
- }
- if (nc & 1) {
- c1[0] = (uint8_t) vout1x0;
- c0[0] = (uint8_t) vout0x0;
- }
-
- nc = 0;
- }
- } while (nc != 0);
-}
diff --git a/src/qu8-igemm/gen/2x4-minmax-fp32-scalar-magic.c b/src/qu8-igemm/gen/2x4-minmax-fp32-scalar-lrintf.c
similarity index 78%
copy from src/qu8-igemm/gen/2x4-minmax-fp32-scalar-magic.c
copy to src/qu8-igemm/gen/2x4-minmax-fp32-scalar-lrintf.c
index 8b1416f..0887d37 100644
--- a/src/qu8-igemm/gen/2x4-minmax-fp32-scalar-magic.c
+++ b/src/qu8-igemm/gen/2x4-minmax-fp32-scalar-lrintf.c
@@ -8,14 +8,13 @@
// LICENSE file in the root directory of this source tree.
#include <assert.h>
-
-#include <fp16.h>
+#include <math.h>
#include <xnnpack/math.h>
#include <xnnpack/gemm.h>
-void xnn_qu8_igemm_minmax_fp32_ukernel_2x4__scalar_magic(
+void xnn_qu8_igemm_minmax_fp32_ukernel_2x4__scalar_lrintf(
size_t mr,
size_t nc,
size_t kc,
@@ -45,7 +44,7 @@
c1 = c0;
}
- const int32_t vb_zero_point = params->fp32_scalar_magic.kernel_zero_point;
+ const int32_t vb_zero_point = params->fp32_scalar_lrintf.kernel_zero_point;
do {
int32_t vacc0x0 = ((const int32_t*) w)[0];
int32_t vacc0x1 = ((const int32_t*) w)[1];
@@ -105,7 +104,7 @@
float vfpacc1x2 = (float) vacc1x2;
float vfpacc1x3 = (float) vacc1x3;
- const float vscale = params->fp32_scalar_magic.scale;
+ const float vscale = params->fp32_scalar_lrintf.scale;
vfpacc0x0 *= vscale;
vfpacc0x1 *= vscale;
vfpacc0x2 *= vscale;
@@ -115,7 +114,7 @@
vfpacc1x2 *= vscale;
vfpacc1x3 *= vscale;
- const float voutput_min_less_zero_point = params->fp32_scalar_magic.output_min_less_zero_point;
+ const float voutput_min_less_zero_point = params->fp32_scalar_lrintf.output_min_less_zero_point;
vfpacc0x0 = math_max_f32(vfpacc0x0, voutput_min_less_zero_point);
vfpacc0x1 = math_max_f32(vfpacc0x1, voutput_min_less_zero_point);
vfpacc0x2 = math_max_f32(vfpacc0x2, voutput_min_less_zero_point);
@@ -125,7 +124,7 @@
vfpacc1x2 = math_max_f32(vfpacc1x2, voutput_min_less_zero_point);
vfpacc1x3 = math_max_f32(vfpacc1x3, voutput_min_less_zero_point);
- const float voutput_max_less_zero_point = params->fp32_scalar_magic.output_max_less_zero_point;
+ const float voutput_max_less_zero_point = params->fp32_scalar_lrintf.output_max_less_zero_point;
vfpacc0x0 = math_min_f32(vfpacc0x0, voutput_max_less_zero_point);
vfpacc0x1 = math_min_f32(vfpacc0x1, voutput_max_less_zero_point);
vfpacc0x2 = math_min_f32(vfpacc0x2, voutput_max_less_zero_point);
@@ -135,25 +134,24 @@
vfpacc1x2 = math_min_f32(vfpacc1x2, voutput_max_less_zero_point);
vfpacc1x3 = math_min_f32(vfpacc1x3, voutput_max_less_zero_point);
- const float vmagic_bias = params->fp32_scalar_magic.magic_bias;
- vfpacc0x0 += vmagic_bias;
- vfpacc0x1 += vmagic_bias;
- vfpacc0x2 += vmagic_bias;
- vfpacc0x3 += vmagic_bias;
- vfpacc1x0 += vmagic_bias;
- vfpacc1x1 += vmagic_bias;
- vfpacc1x2 += vmagic_bias;
- vfpacc1x3 += vmagic_bias;
+ const int32_t vrndacc0x0 = (int32_t) lrintf(vfpacc0x0);
+ const int32_t vrndacc0x1 = (int32_t) lrintf(vfpacc0x1);
+ const int32_t vrndacc0x2 = (int32_t) lrintf(vfpacc0x2);
+ const int32_t vrndacc0x3 = (int32_t) lrintf(vfpacc0x3);
+ const int32_t vrndacc1x0 = (int32_t) lrintf(vfpacc1x0);
+ const int32_t vrndacc1x1 = (int32_t) lrintf(vfpacc1x1);
+ const int32_t vrndacc1x2 = (int32_t) lrintf(vfpacc1x2);
+ const int32_t vrndacc1x3 = (int32_t) lrintf(vfpacc1x3);
- const int32_t vmagic_bias_less_output_zero_point = params->fp32_scalar_magic.magic_bias_less_output_zero_point;
- int32_t vout0x0 = (int32_t) fp32_to_bits(vfpacc0x0) - vmagic_bias_less_output_zero_point;
- int32_t vout0x1 = (int32_t) fp32_to_bits(vfpacc0x1) - vmagic_bias_less_output_zero_point;
- int32_t vout0x2 = (int32_t) fp32_to_bits(vfpacc0x2) - vmagic_bias_less_output_zero_point;
- int32_t vout0x3 = (int32_t) fp32_to_bits(vfpacc0x3) - vmagic_bias_less_output_zero_point;
- int32_t vout1x0 = (int32_t) fp32_to_bits(vfpacc1x0) - vmagic_bias_less_output_zero_point;
- int32_t vout1x1 = (int32_t) fp32_to_bits(vfpacc1x1) - vmagic_bias_less_output_zero_point;
- int32_t vout1x2 = (int32_t) fp32_to_bits(vfpacc1x2) - vmagic_bias_less_output_zero_point;
- int32_t vout1x3 = (int32_t) fp32_to_bits(vfpacc1x3) - vmagic_bias_less_output_zero_point;
+ const int32_t voutput_zero_point = params->fp32_scalar_lrintf.output_zero_point;
+ int32_t vout0x0 = vrndacc0x0 + voutput_zero_point;
+ int32_t vout0x1 = vrndacc0x1 + voutput_zero_point;
+ int32_t vout0x2 = vrndacc0x2 + voutput_zero_point;
+ int32_t vout0x3 = vrndacc0x3 + voutput_zero_point;
+ int32_t vout1x0 = vrndacc1x0 + voutput_zero_point;
+ int32_t vout1x1 = vrndacc1x1 + voutput_zero_point;
+ int32_t vout1x2 = vrndacc1x2 + voutput_zero_point;
+ int32_t vout1x3 = vrndacc1x3 + voutput_zero_point;
if XNN_LIKELY(nc >= 4) {
c1[0] = (uint8_t) vout1x0;
diff --git a/src/qu8-igemm/gen/3x2-minmax-fp32-scalar-magic.c b/src/qu8-igemm/gen/3x2-minmax-fp32-scalar-fmagic.c
similarity index 93%
rename from src/qu8-igemm/gen/3x2-minmax-fp32-scalar-magic.c
rename to src/qu8-igemm/gen/3x2-minmax-fp32-scalar-fmagic.c
index 66e72a7..bfbb57d 100644
--- a/src/qu8-igemm/gen/3x2-minmax-fp32-scalar-magic.c
+++ b/src/qu8-igemm/gen/3x2-minmax-fp32-scalar-fmagic.c
@@ -15,7 +15,7 @@
#include <xnnpack/gemm.h>
-void xnn_qu8_igemm_minmax_fp32_ukernel_3x2__scalar_magic(
+void xnn_qu8_igemm_minmax_fp32_ukernel_3x2__scalar_fmagic(
size_t mr,
size_t nc,
size_t kc,
@@ -49,7 +49,7 @@
c2 = c1;
}
- const int32_t vb_zero_point = params->fp32_scalar_magic.kernel_zero_point;
+ const int32_t vb_zero_point = params->fp32_scalar_fmagic.kernel_zero_point;
do {
int32_t vacc0x0 = ((const int32_t*) w)[0];
int32_t vacc0x1 = ((const int32_t*) w)[1];
@@ -107,7 +107,7 @@
float vfpacc2x0 = (float) vacc2x0;
float vfpacc2x1 = (float) vacc2x1;
- const float vscale = params->fp32_scalar_magic.scale;
+ const float vscale = params->fp32_scalar_fmagic.scale;
vfpacc0x0 *= vscale;
vfpacc0x1 *= vscale;
vfpacc1x0 *= vscale;
@@ -115,7 +115,7 @@
vfpacc2x0 *= vscale;
vfpacc2x1 *= vscale;
- const float voutput_min_less_zero_point = params->fp32_scalar_magic.output_min_less_zero_point;
+ const float voutput_min_less_zero_point = params->fp32_scalar_fmagic.output_min_less_zero_point;
vfpacc0x0 = math_max_f32(vfpacc0x0, voutput_min_less_zero_point);
vfpacc0x1 = math_max_f32(vfpacc0x1, voutput_min_less_zero_point);
vfpacc1x0 = math_max_f32(vfpacc1x0, voutput_min_less_zero_point);
@@ -123,7 +123,7 @@
vfpacc2x0 = math_max_f32(vfpacc2x0, voutput_min_less_zero_point);
vfpacc2x1 = math_max_f32(vfpacc2x1, voutput_min_less_zero_point);
- const float voutput_max_less_zero_point = params->fp32_scalar_magic.output_max_less_zero_point;
+ const float voutput_max_less_zero_point = params->fp32_scalar_fmagic.output_max_less_zero_point;
vfpacc0x0 = math_min_f32(vfpacc0x0, voutput_max_less_zero_point);
vfpacc0x1 = math_min_f32(vfpacc0x1, voutput_max_less_zero_point);
vfpacc1x0 = math_min_f32(vfpacc1x0, voutput_max_less_zero_point);
@@ -131,7 +131,7 @@
vfpacc2x0 = math_min_f32(vfpacc2x0, voutput_max_less_zero_point);
vfpacc2x1 = math_min_f32(vfpacc2x1, voutput_max_less_zero_point);
- const float vmagic_bias = params->fp32_scalar_magic.magic_bias;
+ const float vmagic_bias = params->fp32_scalar_fmagic.magic_bias;
vfpacc0x0 += vmagic_bias;
vfpacc0x1 += vmagic_bias;
vfpacc1x0 += vmagic_bias;
@@ -139,7 +139,7 @@
vfpacc2x0 += vmagic_bias;
vfpacc2x1 += vmagic_bias;
- const int32_t vmagic_bias_less_output_zero_point = params->fp32_scalar_magic.magic_bias_less_output_zero_point;
+ const int32_t vmagic_bias_less_output_zero_point = params->fp32_scalar_fmagic.magic_bias_less_output_zero_point;
int32_t vout0x0 = (int32_t) fp32_to_bits(vfpacc0x0) - vmagic_bias_less_output_zero_point;
int32_t vout0x1 = (int32_t) fp32_to_bits(vfpacc0x1) - vmagic_bias_less_output_zero_point;
int32_t vout1x0 = (int32_t) fp32_to_bits(vfpacc1x0) - vmagic_bias_less_output_zero_point;
diff --git a/src/qu8-igemm/gen/3x2-minmax-fp32-scalar-lrint.c b/src/qu8-igemm/gen/3x2-minmax-fp32-scalar-lrint.c
deleted file mode 100644
index cbab7bc..0000000
--- a/src/qu8-igemm/gen/3x2-minmax-fp32-scalar-lrint.c
+++ /dev/null
@@ -1,172 +0,0 @@
-// Auto-generated file. Do not edit!
-// Template: src/qs8-igemm/scalar.c.in
-// Generator: tools/xngen
-//
-// Copyright 2021 Google LLC
-//
-// This source code is licensed under the BSD-style license found in the
-// LICENSE file in the root directory of this source tree.
-
-#include <assert.h>
-#include <math.h>
-
-#include <xnnpack/math.h>
-#include <xnnpack/gemm.h>
-
-
-void xnn_qu8_igemm_minmax_fp32_ukernel_3x2__scalar_lrint(
- size_t mr,
- size_t nc,
- size_t kc,
- size_t ks,
- const uint8_t**restrict a,
- const void*restrict w,
- uint8_t*restrict c,
- size_t cm_stride,
- size_t cn_stride,
- size_t a_offset,
- const uint8_t* zero,
- const union xnn_qu8_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
-{
- assert(mr != 0);
- assert(mr <= 3);
- assert(nc != 0);
- assert(kc != 0);
- assert(ks != 0);
- assert(ks % (3 * sizeof(void*)) == 0);
- assert(a != NULL);
- assert(w != NULL);
- assert(c != NULL);
-
- uint8_t* c0 = c;
- uint8_t* c1 = (uint8_t*) ((uintptr_t) c0 + cm_stride);
- if XNN_UNPREDICTABLE(mr < 2) {
- c1 = c0;
- }
- uint8_t* c2 = (uint8_t*) ((uintptr_t) c1 + cm_stride);
- if XNN_UNPREDICTABLE(mr <= 2) {
- c2 = c1;
- }
-
- const int32_t vb_zero_point = params->fp32_scalar_lrint.kernel_zero_point;
- do {
- int32_t vacc0x0 = ((const int32_t*) w)[0];
- int32_t vacc0x1 = ((const int32_t*) w)[1];
- int32_t vacc1x0 = vacc0x0;
- int32_t vacc1x1 = vacc0x1;
- int32_t vacc2x0 = vacc0x0;
- int32_t vacc2x1 = vacc0x1;
- w = (const void*) ((const int32_t*) w + 2);
-
- size_t p = ks;
- do {
- const uint8_t* restrict a0 = a[0];
- assert(a0 != NULL);
- if XNN_UNPREDICTABLE(a0 != zero) {
- a0 = (const uint8_t*) ((uintptr_t) a0 + a_offset);
- }
- const uint8_t* restrict a1 = a[1];
- assert(a1 != NULL);
- if XNN_UNPREDICTABLE(a1 != zero) {
- a1 = (const uint8_t*) ((uintptr_t) a1 + a_offset);
- }
- const uint8_t* restrict a2 = a[2];
- assert(a2 != NULL);
- if XNN_UNPREDICTABLE(a2 != zero) {
- a2 = (const uint8_t*) ((uintptr_t) a2 + a_offset);
- }
- a += 3;
-
- size_t k = kc;
- do {
- const int32_t va0 = (int32_t) (uint32_t) *a0++;
- const int32_t va1 = (int32_t) (uint32_t) *a1++;
- const int32_t va2 = (int32_t) (uint32_t) *a2++;
-
- const int32_t vb0 = (int32_t) (uint32_t) ((const uint8_t*) w)[0] - vb_zero_point;
- const int32_t vb1 = (int32_t) (uint32_t) ((const uint8_t*) w)[1] - vb_zero_point;
- w = (const void*) ((const uint8_t*) w + 2);
-
- vacc0x0 += va0 * vb0;
- vacc0x1 += va0 * vb1;
- vacc1x0 += va1 * vb0;
- vacc1x1 += va1 * vb1;
- vacc2x0 += va2 * vb0;
- vacc2x1 += va2 * vb1;
-
- k -= sizeof(uint8_t);
- } while (k != 0);
- p -= 3 * sizeof(void*);
- } while (p != 0);
-
- float vfpacc0x0 = (float) vacc0x0;
- float vfpacc0x1 = (float) vacc0x1;
- float vfpacc1x0 = (float) vacc1x0;
- float vfpacc1x1 = (float) vacc1x1;
- float vfpacc2x0 = (float) vacc2x0;
- float vfpacc2x1 = (float) vacc2x1;
-
- const float vscale = params->fp32_scalar_lrint.scale;
- vfpacc0x0 *= vscale;
- vfpacc0x1 *= vscale;
- vfpacc1x0 *= vscale;
- vfpacc1x1 *= vscale;
- vfpacc2x0 *= vscale;
- vfpacc2x1 *= vscale;
-
- long vrndacc0x0 = lrintf(vfpacc0x0);
- long vrndacc0x1 = lrintf(vfpacc0x1);
- long vrndacc1x0 = lrintf(vfpacc1x0);
- long vrndacc1x1 = lrintf(vfpacc1x1);
- long vrndacc2x0 = lrintf(vfpacc2x0);
- long vrndacc2x1 = lrintf(vfpacc2x1);
-
- const long voutput_min_less_zero_point = params->fp32_scalar_lrint.output_min_less_zero_point;
- vrndacc0x0 = XNN_UNPREDICTABLE(vrndacc0x0 < voutput_min_less_zero_point) ? voutput_min_less_zero_point : vrndacc0x0;
- vrndacc0x1 = XNN_UNPREDICTABLE(vrndacc0x1 < voutput_min_less_zero_point) ? voutput_min_less_zero_point : vrndacc0x1;
- vrndacc1x0 = XNN_UNPREDICTABLE(vrndacc1x0 < voutput_min_less_zero_point) ? voutput_min_less_zero_point : vrndacc1x0;
- vrndacc1x1 = XNN_UNPREDICTABLE(vrndacc1x1 < voutput_min_less_zero_point) ? voutput_min_less_zero_point : vrndacc1x1;
- vrndacc2x0 = XNN_UNPREDICTABLE(vrndacc2x0 < voutput_min_less_zero_point) ? voutput_min_less_zero_point : vrndacc2x0;
- vrndacc2x1 = XNN_UNPREDICTABLE(vrndacc2x1 < voutput_min_less_zero_point) ? voutput_min_less_zero_point : vrndacc2x1;
-
- const long voutput_max_less_zero_point = params->fp32_scalar_lrint.output_max_less_zero_point;
- vrndacc0x0 = XNN_UNPREDICTABLE(vrndacc0x0 > voutput_max_less_zero_point) ? voutput_max_less_zero_point : vrndacc0x0;
- vrndacc0x1 = XNN_UNPREDICTABLE(vrndacc0x1 > voutput_max_less_zero_point) ? voutput_max_less_zero_point : vrndacc0x1;
- vrndacc1x0 = XNN_UNPREDICTABLE(vrndacc1x0 > voutput_max_less_zero_point) ? voutput_max_less_zero_point : vrndacc1x0;
- vrndacc1x1 = XNN_UNPREDICTABLE(vrndacc1x1 > voutput_max_less_zero_point) ? voutput_max_less_zero_point : vrndacc1x1;
- vrndacc2x0 = XNN_UNPREDICTABLE(vrndacc2x0 > voutput_max_less_zero_point) ? voutput_max_less_zero_point : vrndacc2x0;
- vrndacc2x1 = XNN_UNPREDICTABLE(vrndacc2x1 > voutput_max_less_zero_point) ? voutput_max_less_zero_point : vrndacc2x1;
-
- const int32_t voutput_zero_point = params->fp32_scalar_lrint.output_zero_point;
- int32_t vout0x0 = (int32_t) vrndacc0x0 + voutput_zero_point;
- int32_t vout0x1 = (int32_t) vrndacc0x1 + voutput_zero_point;
- int32_t vout1x0 = (int32_t) vrndacc1x0 + voutput_zero_point;
- int32_t vout1x1 = (int32_t) vrndacc1x1 + voutput_zero_point;
- int32_t vout2x0 = (int32_t) vrndacc2x0 + voutput_zero_point;
- int32_t vout2x1 = (int32_t) vrndacc2x1 + voutput_zero_point;
-
- if XNN_LIKELY(nc >= 2) {
- c2[0] = (uint8_t) vout2x0;
- c2[1] = (uint8_t) vout2x1;
- c1[0] = (uint8_t) vout1x0;
- c1[1] = (uint8_t) vout1x1;
- c0[0] = (uint8_t) vout0x0;
- c0[1] = (uint8_t) vout0x1;
-
- c2 = (uint8_t*) ((uintptr_t) c2 + cn_stride);
- c1 = (uint8_t*) ((uintptr_t) c1 + cn_stride);
- c0 = (uint8_t*) ((uintptr_t) c0 + cn_stride);
-
- a = (const uint8_t**restrict) ((uintptr_t) a - ks);
- nc -= 2;
- } else {
- if (nc & 1) {
- c2[0] = (uint8_t) vout2x0;
- c1[0] = (uint8_t) vout1x0;
- c0[0] = (uint8_t) vout0x0;
- }
-
- nc = 0;
- }
- } while (nc != 0);
-}
diff --git a/src/qu8-igemm/gen/3x2-minmax-fp32-scalar-magic.c b/src/qu8-igemm/gen/3x2-minmax-fp32-scalar-lrintf.c
similarity index 79%
copy from src/qu8-igemm/gen/3x2-minmax-fp32-scalar-magic.c
copy to src/qu8-igemm/gen/3x2-minmax-fp32-scalar-lrintf.c
index 66e72a7..55ba647 100644
--- a/src/qu8-igemm/gen/3x2-minmax-fp32-scalar-magic.c
+++ b/src/qu8-igemm/gen/3x2-minmax-fp32-scalar-lrintf.c
@@ -8,14 +8,13 @@
// LICENSE file in the root directory of this source tree.
#include <assert.h>
-
-#include <fp16.h>
+#include <math.h>
#include <xnnpack/math.h>
#include <xnnpack/gemm.h>
-void xnn_qu8_igemm_minmax_fp32_ukernel_3x2__scalar_magic(
+void xnn_qu8_igemm_minmax_fp32_ukernel_3x2__scalar_lrintf(
size_t mr,
size_t nc,
size_t kc,
@@ -49,7 +48,7 @@
c2 = c1;
}
- const int32_t vb_zero_point = params->fp32_scalar_magic.kernel_zero_point;
+ const int32_t vb_zero_point = params->fp32_scalar_lrintf.kernel_zero_point;
do {
int32_t vacc0x0 = ((const int32_t*) w)[0];
int32_t vacc0x1 = ((const int32_t*) w)[1];
@@ -107,7 +106,7 @@
float vfpacc2x0 = (float) vacc2x0;
float vfpacc2x1 = (float) vacc2x1;
- const float vscale = params->fp32_scalar_magic.scale;
+ const float vscale = params->fp32_scalar_lrintf.scale;
vfpacc0x0 *= vscale;
vfpacc0x1 *= vscale;
vfpacc1x0 *= vscale;
@@ -115,7 +114,7 @@
vfpacc2x0 *= vscale;
vfpacc2x1 *= vscale;
- const float voutput_min_less_zero_point = params->fp32_scalar_magic.output_min_less_zero_point;
+ const float voutput_min_less_zero_point = params->fp32_scalar_lrintf.output_min_less_zero_point;
vfpacc0x0 = math_max_f32(vfpacc0x0, voutput_min_less_zero_point);
vfpacc0x1 = math_max_f32(vfpacc0x1, voutput_min_less_zero_point);
vfpacc1x0 = math_max_f32(vfpacc1x0, voutput_min_less_zero_point);
@@ -123,7 +122,7 @@
vfpacc2x0 = math_max_f32(vfpacc2x0, voutput_min_less_zero_point);
vfpacc2x1 = math_max_f32(vfpacc2x1, voutput_min_less_zero_point);
- const float voutput_max_less_zero_point = params->fp32_scalar_magic.output_max_less_zero_point;
+ const float voutput_max_less_zero_point = params->fp32_scalar_lrintf.output_max_less_zero_point;
vfpacc0x0 = math_min_f32(vfpacc0x0, voutput_max_less_zero_point);
vfpacc0x1 = math_min_f32(vfpacc0x1, voutput_max_less_zero_point);
vfpacc1x0 = math_min_f32(vfpacc1x0, voutput_max_less_zero_point);
@@ -131,21 +130,20 @@
vfpacc2x0 = math_min_f32(vfpacc2x0, voutput_max_less_zero_point);
vfpacc2x1 = math_min_f32(vfpacc2x1, voutput_max_less_zero_point);
- const float vmagic_bias = params->fp32_scalar_magic.magic_bias;
- vfpacc0x0 += vmagic_bias;
- vfpacc0x1 += vmagic_bias;
- vfpacc1x0 += vmagic_bias;
- vfpacc1x1 += vmagic_bias;
- vfpacc2x0 += vmagic_bias;
- vfpacc2x1 += vmagic_bias;
+ const int32_t vrndacc0x0 = (int32_t) lrintf(vfpacc0x0);
+ const int32_t vrndacc0x1 = (int32_t) lrintf(vfpacc0x1);
+ const int32_t vrndacc1x0 = (int32_t) lrintf(vfpacc1x0);
+ const int32_t vrndacc1x1 = (int32_t) lrintf(vfpacc1x1);
+ const int32_t vrndacc2x0 = (int32_t) lrintf(vfpacc2x0);
+ const int32_t vrndacc2x1 = (int32_t) lrintf(vfpacc2x1);
- const int32_t vmagic_bias_less_output_zero_point = params->fp32_scalar_magic.magic_bias_less_output_zero_point;
- int32_t vout0x0 = (int32_t) fp32_to_bits(vfpacc0x0) - vmagic_bias_less_output_zero_point;
- int32_t vout0x1 = (int32_t) fp32_to_bits(vfpacc0x1) - vmagic_bias_less_output_zero_point;
- int32_t vout1x0 = (int32_t) fp32_to_bits(vfpacc1x0) - vmagic_bias_less_output_zero_point;
- int32_t vout1x1 = (int32_t) fp32_to_bits(vfpacc1x1) - vmagic_bias_less_output_zero_point;
- int32_t vout2x0 = (int32_t) fp32_to_bits(vfpacc2x0) - vmagic_bias_less_output_zero_point;
- int32_t vout2x1 = (int32_t) fp32_to_bits(vfpacc2x1) - vmagic_bias_less_output_zero_point;
+ const int32_t voutput_zero_point = params->fp32_scalar_lrintf.output_zero_point;
+ int32_t vout0x0 = vrndacc0x0 + voutput_zero_point;
+ int32_t vout0x1 = vrndacc0x1 + voutput_zero_point;
+ int32_t vout1x0 = vrndacc1x0 + voutput_zero_point;
+ int32_t vout1x1 = vrndacc1x1 + voutput_zero_point;
+ int32_t vout2x0 = vrndacc2x0 + voutput_zero_point;
+ int32_t vout2x1 = vrndacc2x1 + voutput_zero_point;
if XNN_LIKELY(nc >= 2) {
c2[0] = (uint8_t) vout2x0;
diff --git a/src/qu8-igemm/gen/3x4-minmax-fp32-scalar-magic.c b/src/qu8-igemm/gen/3x4-minmax-fp32-scalar-fmagic.c
similarity index 95%
rename from src/qu8-igemm/gen/3x4-minmax-fp32-scalar-magic.c
rename to src/qu8-igemm/gen/3x4-minmax-fp32-scalar-fmagic.c
index a23e20e..b17d503 100644
--- a/src/qu8-igemm/gen/3x4-minmax-fp32-scalar-magic.c
+++ b/src/qu8-igemm/gen/3x4-minmax-fp32-scalar-fmagic.c
@@ -15,7 +15,7 @@
#include <xnnpack/gemm.h>
-void xnn_qu8_igemm_minmax_fp32_ukernel_3x4__scalar_magic(
+void xnn_qu8_igemm_minmax_fp32_ukernel_3x4__scalar_fmagic(
size_t mr,
size_t nc,
size_t kc,
@@ -49,7 +49,7 @@
c2 = c1;
}
- const int32_t vb_zero_point = params->fp32_scalar_magic.kernel_zero_point;
+ const int32_t vb_zero_point = params->fp32_scalar_fmagic.kernel_zero_point;
do {
int32_t vacc0x0 = ((const int32_t*) w)[0];
int32_t vacc0x1 = ((const int32_t*) w)[1];
@@ -127,7 +127,7 @@
float vfpacc2x2 = (float) vacc2x2;
float vfpacc2x3 = (float) vacc2x3;
- const float vscale = params->fp32_scalar_magic.scale;
+ const float vscale = params->fp32_scalar_fmagic.scale;
vfpacc0x0 *= vscale;
vfpacc0x1 *= vscale;
vfpacc0x2 *= vscale;
@@ -141,7 +141,7 @@
vfpacc2x2 *= vscale;
vfpacc2x3 *= vscale;
- const float voutput_min_less_zero_point = params->fp32_scalar_magic.output_min_less_zero_point;
+ const float voutput_min_less_zero_point = params->fp32_scalar_fmagic.output_min_less_zero_point;
vfpacc0x0 = math_max_f32(vfpacc0x0, voutput_min_less_zero_point);
vfpacc0x1 = math_max_f32(vfpacc0x1, voutput_min_less_zero_point);
vfpacc0x2 = math_max_f32(vfpacc0x2, voutput_min_less_zero_point);
@@ -155,7 +155,7 @@
vfpacc2x2 = math_max_f32(vfpacc2x2, voutput_min_less_zero_point);
vfpacc2x3 = math_max_f32(vfpacc2x3, voutput_min_less_zero_point);
- const float voutput_max_less_zero_point = params->fp32_scalar_magic.output_max_less_zero_point;
+ const float voutput_max_less_zero_point = params->fp32_scalar_fmagic.output_max_less_zero_point;
vfpacc0x0 = math_min_f32(vfpacc0x0, voutput_max_less_zero_point);
vfpacc0x1 = math_min_f32(vfpacc0x1, voutput_max_less_zero_point);
vfpacc0x2 = math_min_f32(vfpacc0x2, voutput_max_less_zero_point);
@@ -169,7 +169,7 @@
vfpacc2x2 = math_min_f32(vfpacc2x2, voutput_max_less_zero_point);
vfpacc2x3 = math_min_f32(vfpacc2x3, voutput_max_less_zero_point);
- const float vmagic_bias = params->fp32_scalar_magic.magic_bias;
+ const float vmagic_bias = params->fp32_scalar_fmagic.magic_bias;
vfpacc0x0 += vmagic_bias;
vfpacc0x1 += vmagic_bias;
vfpacc0x2 += vmagic_bias;
@@ -183,7 +183,7 @@
vfpacc2x2 += vmagic_bias;
vfpacc2x3 += vmagic_bias;
- const int32_t vmagic_bias_less_output_zero_point = params->fp32_scalar_magic.magic_bias_less_output_zero_point;
+ const int32_t vmagic_bias_less_output_zero_point = params->fp32_scalar_fmagic.magic_bias_less_output_zero_point;
int32_t vout0x0 = (int32_t) fp32_to_bits(vfpacc0x0) - vmagic_bias_less_output_zero_point;
int32_t vout0x1 = (int32_t) fp32_to_bits(vfpacc0x1) - vmagic_bias_less_output_zero_point;
int32_t vout0x2 = (int32_t) fp32_to_bits(vfpacc0x2) - vmagic_bias_less_output_zero_point;
diff --git a/src/qu8-igemm/gen/3x4-minmax-fp32-scalar-lrint.c b/src/qu8-igemm/gen/3x4-minmax-fp32-scalar-lrint.c
deleted file mode 100644
index b6bf185..0000000
--- a/src/qu8-igemm/gen/3x4-minmax-fp32-scalar-lrint.c
+++ /dev/null
@@ -1,242 +0,0 @@
-// Auto-generated file. Do not edit!
-// Template: src/qs8-igemm/scalar.c.in
-// Generator: tools/xngen
-//
-// Copyright 2021 Google LLC
-//
-// This source code is licensed under the BSD-style license found in the
-// LICENSE file in the root directory of this source tree.
-
-#include <assert.h>
-#include <math.h>
-
-#include <xnnpack/math.h>
-#include <xnnpack/gemm.h>
-
-
-void xnn_qu8_igemm_minmax_fp32_ukernel_3x4__scalar_lrint(
- size_t mr,
- size_t nc,
- size_t kc,
- size_t ks,
- const uint8_t**restrict a,
- const void*restrict w,
- uint8_t*restrict c,
- size_t cm_stride,
- size_t cn_stride,
- size_t a_offset,
- const uint8_t* zero,
- const union xnn_qu8_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
-{
- assert(mr != 0);
- assert(mr <= 3);
- assert(nc != 0);
- assert(kc != 0);
- assert(ks != 0);
- assert(ks % (3 * sizeof(void*)) == 0);
- assert(a != NULL);
- assert(w != NULL);
- assert(c != NULL);
-
- uint8_t* c0 = c;
- uint8_t* c1 = (uint8_t*) ((uintptr_t) c0 + cm_stride);
- if XNN_UNPREDICTABLE(mr < 2) {
- c1 = c0;
- }
- uint8_t* c2 = (uint8_t*) ((uintptr_t) c1 + cm_stride);
- if XNN_UNPREDICTABLE(mr <= 2) {
- c2 = c1;
- }
-
- const int32_t vb_zero_point = params->fp32_scalar_lrint.kernel_zero_point;
- do {
- int32_t vacc0x0 = ((const int32_t*) w)[0];
- int32_t vacc0x1 = ((const int32_t*) w)[1];
- int32_t vacc0x2 = ((const int32_t*) w)[2];
- int32_t vacc0x3 = ((const int32_t*) w)[3];
- int32_t vacc1x0 = vacc0x0;
- int32_t vacc1x1 = vacc0x1;
- int32_t vacc1x2 = vacc0x2;
- int32_t vacc1x3 = vacc0x3;
- int32_t vacc2x0 = vacc0x0;
- int32_t vacc2x1 = vacc0x1;
- int32_t vacc2x2 = vacc0x2;
- int32_t vacc2x3 = vacc0x3;
- w = (const void*) ((const int32_t*) w + 4);
-
- size_t p = ks;
- do {
- const uint8_t* restrict a0 = a[0];
- assert(a0 != NULL);
- if XNN_UNPREDICTABLE(a0 != zero) {
- a0 = (const uint8_t*) ((uintptr_t) a0 + a_offset);
- }
- const uint8_t* restrict a1 = a[1];
- assert(a1 != NULL);
- if XNN_UNPREDICTABLE(a1 != zero) {
- a1 = (const uint8_t*) ((uintptr_t) a1 + a_offset);
- }
- const uint8_t* restrict a2 = a[2];
- assert(a2 != NULL);
- if XNN_UNPREDICTABLE(a2 != zero) {
- a2 = (const uint8_t*) ((uintptr_t) a2 + a_offset);
- }
- a += 3;
-
- size_t k = kc;
- do {
- const int32_t va0 = (int32_t) (uint32_t) *a0++;
- const int32_t va1 = (int32_t) (uint32_t) *a1++;
- const int32_t va2 = (int32_t) (uint32_t) *a2++;
-
- const int32_t vb0 = (int32_t) (uint32_t) ((const uint8_t*) w)[0] - vb_zero_point;
- const int32_t vb1 = (int32_t) (uint32_t) ((const uint8_t*) w)[1] - vb_zero_point;
- const int32_t vb2 = (int32_t) (uint32_t) ((const uint8_t*) w)[2] - vb_zero_point;
- const int32_t vb3 = (int32_t) (uint32_t) ((const uint8_t*) w)[3] - vb_zero_point;
- w = (const void*) ((const uint8_t*) w + 4);
-
- vacc0x0 += va0 * vb0;
- vacc0x1 += va0 * vb1;
- vacc0x2 += va0 * vb2;
- vacc0x3 += va0 * vb3;
- vacc1x0 += va1 * vb0;
- vacc1x1 += va1 * vb1;
- vacc1x2 += va1 * vb2;
- vacc1x3 += va1 * vb3;
- vacc2x0 += va2 * vb0;
- vacc2x1 += va2 * vb1;
- vacc2x2 += va2 * vb2;
- vacc2x3 += va2 * vb3;
-
- k -= sizeof(uint8_t);
- } while (k != 0);
- p -= 3 * sizeof(void*);
- } while (p != 0);
-
- float vfpacc0x0 = (float) vacc0x0;
- float vfpacc0x1 = (float) vacc0x1;
- float vfpacc0x2 = (float) vacc0x2;
- float vfpacc0x3 = (float) vacc0x3;
- float vfpacc1x0 = (float) vacc1x0;
- float vfpacc1x1 = (float) vacc1x1;
- float vfpacc1x2 = (float) vacc1x2;
- float vfpacc1x3 = (float) vacc1x3;
- float vfpacc2x0 = (float) vacc2x0;
- float vfpacc2x1 = (float) vacc2x1;
- float vfpacc2x2 = (float) vacc2x2;
- float vfpacc2x3 = (float) vacc2x3;
-
- const float vscale = params->fp32_scalar_lrint.scale;
- vfpacc0x0 *= vscale;
- vfpacc0x1 *= vscale;
- vfpacc0x2 *= vscale;
- vfpacc0x3 *= vscale;
- vfpacc1x0 *= vscale;
- vfpacc1x1 *= vscale;
- vfpacc1x2 *= vscale;
- vfpacc1x3 *= vscale;
- vfpacc2x0 *= vscale;
- vfpacc2x1 *= vscale;
- vfpacc2x2 *= vscale;
- vfpacc2x3 *= vscale;
-
- long vrndacc0x0 = lrintf(vfpacc0x0);
- long vrndacc0x1 = lrintf(vfpacc0x1);
- long vrndacc0x2 = lrintf(vfpacc0x2);
- long vrndacc0x3 = lrintf(vfpacc0x3);
- long vrndacc1x0 = lrintf(vfpacc1x0);
- long vrndacc1x1 = lrintf(vfpacc1x1);
- long vrndacc1x2 = lrintf(vfpacc1x2);
- long vrndacc1x3 = lrintf(vfpacc1x3);
- long vrndacc2x0 = lrintf(vfpacc2x0);
- long vrndacc2x1 = lrintf(vfpacc2x1);
- long vrndacc2x2 = lrintf(vfpacc2x2);
- long vrndacc2x3 = lrintf(vfpacc2x3);
-
- const long voutput_min_less_zero_point = params->fp32_scalar_lrint.output_min_less_zero_point;
- vrndacc0x0 = XNN_UNPREDICTABLE(vrndacc0x0 < voutput_min_less_zero_point) ? voutput_min_less_zero_point : vrndacc0x0;
- vrndacc0x1 = XNN_UNPREDICTABLE(vrndacc0x1 < voutput_min_less_zero_point) ? voutput_min_less_zero_point : vrndacc0x1;
- vrndacc0x2 = XNN_UNPREDICTABLE(vrndacc0x2 < voutput_min_less_zero_point) ? voutput_min_less_zero_point : vrndacc0x2;
- vrndacc0x3 = XNN_UNPREDICTABLE(vrndacc0x3 < voutput_min_less_zero_point) ? voutput_min_less_zero_point : vrndacc0x3;
- vrndacc1x0 = XNN_UNPREDICTABLE(vrndacc1x0 < voutput_min_less_zero_point) ? voutput_min_less_zero_point : vrndacc1x0;
- vrndacc1x1 = XNN_UNPREDICTABLE(vrndacc1x1 < voutput_min_less_zero_point) ? voutput_min_less_zero_point : vrndacc1x1;
- vrndacc1x2 = XNN_UNPREDICTABLE(vrndacc1x2 < voutput_min_less_zero_point) ? voutput_min_less_zero_point : vrndacc1x2;
- vrndacc1x3 = XNN_UNPREDICTABLE(vrndacc1x3 < voutput_min_less_zero_point) ? voutput_min_less_zero_point : vrndacc1x3;
- vrndacc2x0 = XNN_UNPREDICTABLE(vrndacc2x0 < voutput_min_less_zero_point) ? voutput_min_less_zero_point : vrndacc2x0;
- vrndacc2x1 = XNN_UNPREDICTABLE(vrndacc2x1 < voutput_min_less_zero_point) ? voutput_min_less_zero_point : vrndacc2x1;
- vrndacc2x2 = XNN_UNPREDICTABLE(vrndacc2x2 < voutput_min_less_zero_point) ? voutput_min_less_zero_point : vrndacc2x2;
- vrndacc2x3 = XNN_UNPREDICTABLE(vrndacc2x3 < voutput_min_less_zero_point) ? voutput_min_less_zero_point : vrndacc2x3;
-
- const long voutput_max_less_zero_point = params->fp32_scalar_lrint.output_max_less_zero_point;
- vrndacc0x0 = XNN_UNPREDICTABLE(vrndacc0x0 > voutput_max_less_zero_point) ? voutput_max_less_zero_point : vrndacc0x0;
- vrndacc0x1 = XNN_UNPREDICTABLE(vrndacc0x1 > voutput_max_less_zero_point) ? voutput_max_less_zero_point : vrndacc0x1;
- vrndacc0x2 = XNN_UNPREDICTABLE(vrndacc0x2 > voutput_max_less_zero_point) ? voutput_max_less_zero_point : vrndacc0x2;
- vrndacc0x3 = XNN_UNPREDICTABLE(vrndacc0x3 > voutput_max_less_zero_point) ? voutput_max_less_zero_point : vrndacc0x3;
- vrndacc1x0 = XNN_UNPREDICTABLE(vrndacc1x0 > voutput_max_less_zero_point) ? voutput_max_less_zero_point : vrndacc1x0;
- vrndacc1x1 = XNN_UNPREDICTABLE(vrndacc1x1 > voutput_max_less_zero_point) ? voutput_max_less_zero_point : vrndacc1x1;
- vrndacc1x2 = XNN_UNPREDICTABLE(vrndacc1x2 > voutput_max_less_zero_point) ? voutput_max_less_zero_point : vrndacc1x2;
- vrndacc1x3 = XNN_UNPREDICTABLE(vrndacc1x3 > voutput_max_less_zero_point) ? voutput_max_less_zero_point : vrndacc1x3;
- vrndacc2x0 = XNN_UNPREDICTABLE(vrndacc2x0 > voutput_max_less_zero_point) ? voutput_max_less_zero_point : vrndacc2x0;
- vrndacc2x1 = XNN_UNPREDICTABLE(vrndacc2x1 > voutput_max_less_zero_point) ? voutput_max_less_zero_point : vrndacc2x1;
- vrndacc2x2 = XNN_UNPREDICTABLE(vrndacc2x2 > voutput_max_less_zero_point) ? voutput_max_less_zero_point : vrndacc2x2;
- vrndacc2x3 = XNN_UNPREDICTABLE(vrndacc2x3 > voutput_max_less_zero_point) ? voutput_max_less_zero_point : vrndacc2x3;
-
- const int32_t voutput_zero_point = params->fp32_scalar_lrint.output_zero_point;
- int32_t vout0x0 = (int32_t) vrndacc0x0 + voutput_zero_point;
- int32_t vout0x1 = (int32_t) vrndacc0x1 + voutput_zero_point;
- int32_t vout0x2 = (int32_t) vrndacc0x2 + voutput_zero_point;
- int32_t vout0x3 = (int32_t) vrndacc0x3 + voutput_zero_point;
- int32_t vout1x0 = (int32_t) vrndacc1x0 + voutput_zero_point;
- int32_t vout1x1 = (int32_t) vrndacc1x1 + voutput_zero_point;
- int32_t vout1x2 = (int32_t) vrndacc1x2 + voutput_zero_point;
- int32_t vout1x3 = (int32_t) vrndacc1x3 + voutput_zero_point;
- int32_t vout2x0 = (int32_t) vrndacc2x0 + voutput_zero_point;
- int32_t vout2x1 = (int32_t) vrndacc2x1 + voutput_zero_point;
- int32_t vout2x2 = (int32_t) vrndacc2x2 + voutput_zero_point;
- int32_t vout2x3 = (int32_t) vrndacc2x3 + voutput_zero_point;
-
- if XNN_LIKELY(nc >= 4) {
- c2[0] = (uint8_t) vout2x0;
- c2[1] = (uint8_t) vout2x1;
- c2[2] = (uint8_t) vout2x2;
- c2[3] = (uint8_t) vout2x3;
- c1[0] = (uint8_t) vout1x0;
- c1[1] = (uint8_t) vout1x1;
- c1[2] = (uint8_t) vout1x2;
- c1[3] = (uint8_t) vout1x3;
- c0[0] = (uint8_t) vout0x0;
- c0[1] = (uint8_t) vout0x1;
- c0[2] = (uint8_t) vout0x2;
- c0[3] = (uint8_t) vout0x3;
-
- c2 = (uint8_t*) ((uintptr_t) c2 + cn_stride);
- c1 = (uint8_t*) ((uintptr_t) c1 + cn_stride);
- c0 = (uint8_t*) ((uintptr_t) c0 + cn_stride);
-
- a = (const uint8_t**restrict) ((uintptr_t) a - ks);
- nc -= 4;
- } else {
- if (nc & 2) {
- c2[0] = (uint8_t) vout2x0;
- c2[1] = (uint8_t) vout2x1;
- vout2x0 = vout2x2;
- c2 += 2;
- c1[0] = (uint8_t) vout1x0;
- c1[1] = (uint8_t) vout1x1;
- vout1x0 = vout1x2;
- c1 += 2;
- c0[0] = (uint8_t) vout0x0;
- c0[1] = (uint8_t) vout0x1;
- vout0x0 = vout0x2;
- c0 += 2;
- }
- if (nc & 1) {
- c2[0] = (uint8_t) vout2x0;
- c1[0] = (uint8_t) vout1x0;
- c0[0] = (uint8_t) vout0x0;
- }
-
- nc = 0;
- }
- } while (nc != 0);
-}
diff --git a/src/qu8-igemm/gen/3x4-minmax-fp32-scalar-magic.c b/src/qu8-igemm/gen/3x4-minmax-fp32-scalar-lrintf.c
similarity index 77%
copy from src/qu8-igemm/gen/3x4-minmax-fp32-scalar-magic.c
copy to src/qu8-igemm/gen/3x4-minmax-fp32-scalar-lrintf.c
index a23e20e..f97b2dd 100644
--- a/src/qu8-igemm/gen/3x4-minmax-fp32-scalar-magic.c
+++ b/src/qu8-igemm/gen/3x4-minmax-fp32-scalar-lrintf.c
@@ -8,14 +8,13 @@
// LICENSE file in the root directory of this source tree.
#include <assert.h>
-
-#include <fp16.h>
+#include <math.h>
#include <xnnpack/math.h>
#include <xnnpack/gemm.h>
-void xnn_qu8_igemm_minmax_fp32_ukernel_3x4__scalar_magic(
+void xnn_qu8_igemm_minmax_fp32_ukernel_3x4__scalar_lrintf(
size_t mr,
size_t nc,
size_t kc,
@@ -49,7 +48,7 @@
c2 = c1;
}
- const int32_t vb_zero_point = params->fp32_scalar_magic.kernel_zero_point;
+ const int32_t vb_zero_point = params->fp32_scalar_lrintf.kernel_zero_point;
do {
int32_t vacc0x0 = ((const int32_t*) w)[0];
int32_t vacc0x1 = ((const int32_t*) w)[1];
@@ -127,7 +126,7 @@
float vfpacc2x2 = (float) vacc2x2;
float vfpacc2x3 = (float) vacc2x3;
- const float vscale = params->fp32_scalar_magic.scale;
+ const float vscale = params->fp32_scalar_lrintf.scale;
vfpacc0x0 *= vscale;
vfpacc0x1 *= vscale;
vfpacc0x2 *= vscale;
@@ -141,7 +140,7 @@
vfpacc2x2 *= vscale;
vfpacc2x3 *= vscale;
- const float voutput_min_less_zero_point = params->fp32_scalar_magic.output_min_less_zero_point;
+ const float voutput_min_less_zero_point = params->fp32_scalar_lrintf.output_min_less_zero_point;
vfpacc0x0 = math_max_f32(vfpacc0x0, voutput_min_less_zero_point);
vfpacc0x1 = math_max_f32(vfpacc0x1, voutput_min_less_zero_point);
vfpacc0x2 = math_max_f32(vfpacc0x2, voutput_min_less_zero_point);
@@ -155,7 +154,7 @@
vfpacc2x2 = math_max_f32(vfpacc2x2, voutput_min_less_zero_point);
vfpacc2x3 = math_max_f32(vfpacc2x3, voutput_min_less_zero_point);
- const float voutput_max_less_zero_point = params->fp32_scalar_magic.output_max_less_zero_point;
+ const float voutput_max_less_zero_point = params->fp32_scalar_lrintf.output_max_less_zero_point;
vfpacc0x0 = math_min_f32(vfpacc0x0, voutput_max_less_zero_point);
vfpacc0x1 = math_min_f32(vfpacc0x1, voutput_max_less_zero_point);
vfpacc0x2 = math_min_f32(vfpacc0x2, voutput_max_less_zero_point);
@@ -169,33 +168,32 @@
vfpacc2x2 = math_min_f32(vfpacc2x2, voutput_max_less_zero_point);
vfpacc2x3 = math_min_f32(vfpacc2x3, voutput_max_less_zero_point);
- const float vmagic_bias = params->fp32_scalar_magic.magic_bias;
- vfpacc0x0 += vmagic_bias;
- vfpacc0x1 += vmagic_bias;
- vfpacc0x2 += vmagic_bias;
- vfpacc0x3 += vmagic_bias;
- vfpacc1x0 += vmagic_bias;
- vfpacc1x1 += vmagic_bias;
- vfpacc1x2 += vmagic_bias;
- vfpacc1x3 += vmagic_bias;
- vfpacc2x0 += vmagic_bias;
- vfpacc2x1 += vmagic_bias;
- vfpacc2x2 += vmagic_bias;
- vfpacc2x3 += vmagic_bias;
+ const int32_t vrndacc0x0 = (int32_t) lrintf(vfpacc0x0);
+ const int32_t vrndacc0x1 = (int32_t) lrintf(vfpacc0x1);
+ const int32_t vrndacc0x2 = (int32_t) lrintf(vfpacc0x2);
+ const int32_t vrndacc0x3 = (int32_t) lrintf(vfpacc0x3);
+ const int32_t vrndacc1x0 = (int32_t) lrintf(vfpacc1x0);
+ const int32_t vrndacc1x1 = (int32_t) lrintf(vfpacc1x1);
+ const int32_t vrndacc1x2 = (int32_t) lrintf(vfpacc1x2);
+ const int32_t vrndacc1x3 = (int32_t) lrintf(vfpacc1x3);
+ const int32_t vrndacc2x0 = (int32_t) lrintf(vfpacc2x0);
+ const int32_t vrndacc2x1 = (int32_t) lrintf(vfpacc2x1);
+ const int32_t vrndacc2x2 = (int32_t) lrintf(vfpacc2x2);
+ const int32_t vrndacc2x3 = (int32_t) lrintf(vfpacc2x3);
- const int32_t vmagic_bias_less_output_zero_point = params->fp32_scalar_magic.magic_bias_less_output_zero_point;
- int32_t vout0x0 = (int32_t) fp32_to_bits(vfpacc0x0) - vmagic_bias_less_output_zero_point;
- int32_t vout0x1 = (int32_t) fp32_to_bits(vfpacc0x1) - vmagic_bias_less_output_zero_point;
- int32_t vout0x2 = (int32_t) fp32_to_bits(vfpacc0x2) - vmagic_bias_less_output_zero_point;
- int32_t vout0x3 = (int32_t) fp32_to_bits(vfpacc0x3) - vmagic_bias_less_output_zero_point;
- int32_t vout1x0 = (int32_t) fp32_to_bits(vfpacc1x0) - vmagic_bias_less_output_zero_point;
- int32_t vout1x1 = (int32_t) fp32_to_bits(vfpacc1x1) - vmagic_bias_less_output_zero_point;
- int32_t vout1x2 = (int32_t) fp32_to_bits(vfpacc1x2) - vmagic_bias_less_output_zero_point;
- int32_t vout1x3 = (int32_t) fp32_to_bits(vfpacc1x3) - vmagic_bias_less_output_zero_point;
- int32_t vout2x0 = (int32_t) fp32_to_bits(vfpacc2x0) - vmagic_bias_less_output_zero_point;
- int32_t vout2x1 = (int32_t) fp32_to_bits(vfpacc2x1) - vmagic_bias_less_output_zero_point;
- int32_t vout2x2 = (int32_t) fp32_to_bits(vfpacc2x2) - vmagic_bias_less_output_zero_point;
- int32_t vout2x3 = (int32_t) fp32_to_bits(vfpacc2x3) - vmagic_bias_less_output_zero_point;
+ const int32_t voutput_zero_point = params->fp32_scalar_lrintf.output_zero_point;
+ int32_t vout0x0 = vrndacc0x0 + voutput_zero_point;
+ int32_t vout0x1 = vrndacc0x1 + voutput_zero_point;
+ int32_t vout0x2 = vrndacc0x2 + voutput_zero_point;
+ int32_t vout0x3 = vrndacc0x3 + voutput_zero_point;
+ int32_t vout1x0 = vrndacc1x0 + voutput_zero_point;
+ int32_t vout1x1 = vrndacc1x1 + voutput_zero_point;
+ int32_t vout1x2 = vrndacc1x2 + voutput_zero_point;
+ int32_t vout1x3 = vrndacc1x3 + voutput_zero_point;
+ int32_t vout2x0 = vrndacc2x0 + voutput_zero_point;
+ int32_t vout2x1 = vrndacc2x1 + voutput_zero_point;
+ int32_t vout2x2 = vrndacc2x2 + voutput_zero_point;
+ int32_t vout2x3 = vrndacc2x3 + voutput_zero_point;
if XNN_LIKELY(nc >= 4) {
c2[0] = (uint8_t) vout2x0;
diff --git a/src/qu8-igemm/gen/4x2-minmax-fp32-scalar-magic.c b/src/qu8-igemm/gen/4x2-minmax-fp32-scalar-fmagic.c
similarity index 94%
rename from src/qu8-igemm/gen/4x2-minmax-fp32-scalar-magic.c
rename to src/qu8-igemm/gen/4x2-minmax-fp32-scalar-fmagic.c
index e952273..0296568 100644
--- a/src/qu8-igemm/gen/4x2-minmax-fp32-scalar-magic.c
+++ b/src/qu8-igemm/gen/4x2-minmax-fp32-scalar-fmagic.c
@@ -15,7 +15,7 @@
#include <xnnpack/gemm.h>
-void xnn_qu8_igemm_minmax_fp32_ukernel_4x2__scalar_magic(
+void xnn_qu8_igemm_minmax_fp32_ukernel_4x2__scalar_fmagic(
size_t mr,
size_t nc,
size_t kc,
@@ -53,7 +53,7 @@
c3 = c2;
}
- const int32_t vb_zero_point = params->fp32_scalar_magic.kernel_zero_point;
+ const int32_t vb_zero_point = params->fp32_scalar_fmagic.kernel_zero_point;
do {
int32_t vacc0x0 = ((const int32_t*) w)[0];
int32_t vacc0x1 = ((const int32_t*) w)[1];
@@ -123,7 +123,7 @@
float vfpacc3x0 = (float) vacc3x0;
float vfpacc3x1 = (float) vacc3x1;
- const float vscale = params->fp32_scalar_magic.scale;
+ const float vscale = params->fp32_scalar_fmagic.scale;
vfpacc0x0 *= vscale;
vfpacc0x1 *= vscale;
vfpacc1x0 *= vscale;
@@ -133,7 +133,7 @@
vfpacc3x0 *= vscale;
vfpacc3x1 *= vscale;
- const float voutput_min_less_zero_point = params->fp32_scalar_magic.output_min_less_zero_point;
+ const float voutput_min_less_zero_point = params->fp32_scalar_fmagic.output_min_less_zero_point;
vfpacc0x0 = math_max_f32(vfpacc0x0, voutput_min_less_zero_point);
vfpacc0x1 = math_max_f32(vfpacc0x1, voutput_min_less_zero_point);
vfpacc1x0 = math_max_f32(vfpacc1x0, voutput_min_less_zero_point);
@@ -143,7 +143,7 @@
vfpacc3x0 = math_max_f32(vfpacc3x0, voutput_min_less_zero_point);
vfpacc3x1 = math_max_f32(vfpacc3x1, voutput_min_less_zero_point);
- const float voutput_max_less_zero_point = params->fp32_scalar_magic.output_max_less_zero_point;
+ const float voutput_max_less_zero_point = params->fp32_scalar_fmagic.output_max_less_zero_point;
vfpacc0x0 = math_min_f32(vfpacc0x0, voutput_max_less_zero_point);
vfpacc0x1 = math_min_f32(vfpacc0x1, voutput_max_less_zero_point);
vfpacc1x0 = math_min_f32(vfpacc1x0, voutput_max_less_zero_point);
@@ -153,7 +153,7 @@
vfpacc3x0 = math_min_f32(vfpacc3x0, voutput_max_less_zero_point);
vfpacc3x1 = math_min_f32(vfpacc3x1, voutput_max_less_zero_point);
- const float vmagic_bias = params->fp32_scalar_magic.magic_bias;
+ const float vmagic_bias = params->fp32_scalar_fmagic.magic_bias;
vfpacc0x0 += vmagic_bias;
vfpacc0x1 += vmagic_bias;
vfpacc1x0 += vmagic_bias;
@@ -163,7 +163,7 @@
vfpacc3x0 += vmagic_bias;
vfpacc3x1 += vmagic_bias;
- const int32_t vmagic_bias_less_output_zero_point = params->fp32_scalar_magic.magic_bias_less_output_zero_point;
+ const int32_t vmagic_bias_less_output_zero_point = params->fp32_scalar_fmagic.magic_bias_less_output_zero_point;
int32_t vout0x0 = (int32_t) fp32_to_bits(vfpacc0x0) - vmagic_bias_less_output_zero_point;
int32_t vout0x1 = (int32_t) fp32_to_bits(vfpacc0x1) - vmagic_bias_less_output_zero_point;
int32_t vout1x0 = (int32_t) fp32_to_bits(vfpacc1x0) - vmagic_bias_less_output_zero_point;
diff --git a/src/qu8-igemm/gen/4x2-minmax-fp32-scalar-lrint.c b/src/qu8-igemm/gen/4x2-minmax-fp32-scalar-lrint.c
deleted file mode 100644
index 43a417f..0000000
--- a/src/qu8-igemm/gen/4x2-minmax-fp32-scalar-lrint.c
+++ /dev/null
@@ -1,202 +0,0 @@
-// Auto-generated file. Do not edit!
-// Template: src/qs8-igemm/scalar.c.in
-// Generator: tools/xngen
-//
-// Copyright 2021 Google LLC
-//
-// This source code is licensed under the BSD-style license found in the
-// LICENSE file in the root directory of this source tree.
-
-#include <assert.h>
-#include <math.h>
-
-#include <xnnpack/math.h>
-#include <xnnpack/gemm.h>
-
-
-void xnn_qu8_igemm_minmax_fp32_ukernel_4x2__scalar_lrint(
- size_t mr,
- size_t nc,
- size_t kc,
- size_t ks,
- const uint8_t**restrict a,
- const void*restrict w,
- uint8_t*restrict c,
- size_t cm_stride,
- size_t cn_stride,
- size_t a_offset,
- const uint8_t* zero,
- const union xnn_qu8_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
-{
- assert(mr != 0);
- assert(mr <= 4);
- assert(nc != 0);
- assert(kc != 0);
- assert(ks != 0);
- assert(ks % (4 * sizeof(void*)) == 0);
- assert(a != NULL);
- assert(w != NULL);
- assert(c != NULL);
-
- uint8_t* c0 = c;
- uint8_t* c1 = (uint8_t*) ((uintptr_t) c0 + cm_stride);
- if XNN_UNPREDICTABLE(mr < 2) {
- c1 = c0;
- }
- uint8_t* c2 = (uint8_t*) ((uintptr_t) c1 + cm_stride);
- if XNN_UNPREDICTABLE(mr <= 2) {
- c2 = c1;
- }
- uint8_t* c3 = (uint8_t*) ((uintptr_t) c2 + cm_stride);
- if XNN_UNPREDICTABLE(mr != 4) {
- c3 = c2;
- }
-
- const int32_t vb_zero_point = params->fp32_scalar_lrint.kernel_zero_point;
- do {
- int32_t vacc0x0 = ((const int32_t*) w)[0];
- int32_t vacc0x1 = ((const int32_t*) w)[1];
- int32_t vacc1x0 = vacc0x0;
- int32_t vacc1x1 = vacc0x1;
- int32_t vacc2x0 = vacc0x0;
- int32_t vacc2x1 = vacc0x1;
- int32_t vacc3x0 = vacc0x0;
- int32_t vacc3x1 = vacc0x1;
- w = (const void*) ((const int32_t*) w + 2);
-
- size_t p = ks;
- do {
- const uint8_t* restrict a0 = a[0];
- assert(a0 != NULL);
- if XNN_UNPREDICTABLE(a0 != zero) {
- a0 = (const uint8_t*) ((uintptr_t) a0 + a_offset);
- }
- const uint8_t* restrict a1 = a[1];
- assert(a1 != NULL);
- if XNN_UNPREDICTABLE(a1 != zero) {
- a1 = (const uint8_t*) ((uintptr_t) a1 + a_offset);
- }
- const uint8_t* restrict a2 = a[2];
- assert(a2 != NULL);
- if XNN_UNPREDICTABLE(a2 != zero) {
- a2 = (const uint8_t*) ((uintptr_t) a2 + a_offset);
- }
- const uint8_t* restrict a3 = a[3];
- assert(a3 != NULL);
- if XNN_UNPREDICTABLE(a3 != zero) {
- a3 = (const uint8_t*) ((uintptr_t) a3 + a_offset);
- }
- a += 4;
-
- size_t k = kc;
- do {
- const int32_t va0 = (int32_t) (uint32_t) *a0++;
- const int32_t va1 = (int32_t) (uint32_t) *a1++;
- const int32_t va2 = (int32_t) (uint32_t) *a2++;
- const int32_t va3 = (int32_t) (uint32_t) *a3++;
-
- const int32_t vb0 = (int32_t) (uint32_t) ((const uint8_t*) w)[0] - vb_zero_point;
- const int32_t vb1 = (int32_t) (uint32_t) ((const uint8_t*) w)[1] - vb_zero_point;
- w = (const void*) ((const uint8_t*) w + 2);
-
- vacc0x0 += va0 * vb0;
- vacc0x1 += va0 * vb1;
- vacc1x0 += va1 * vb0;
- vacc1x1 += va1 * vb1;
- vacc2x0 += va2 * vb0;
- vacc2x1 += va2 * vb1;
- vacc3x0 += va3 * vb0;
- vacc3x1 += va3 * vb1;
-
- k -= sizeof(uint8_t);
- } while (k != 0);
- p -= 4 * sizeof(void*);
- } while (p != 0);
-
- float vfpacc0x0 = (float) vacc0x0;
- float vfpacc0x1 = (float) vacc0x1;
- float vfpacc1x0 = (float) vacc1x0;
- float vfpacc1x1 = (float) vacc1x1;
- float vfpacc2x0 = (float) vacc2x0;
- float vfpacc2x1 = (float) vacc2x1;
- float vfpacc3x0 = (float) vacc3x0;
- float vfpacc3x1 = (float) vacc3x1;
-
- const float vscale = params->fp32_scalar_lrint.scale;
- vfpacc0x0 *= vscale;
- vfpacc0x1 *= vscale;
- vfpacc1x0 *= vscale;
- vfpacc1x1 *= vscale;
- vfpacc2x0 *= vscale;
- vfpacc2x1 *= vscale;
- vfpacc3x0 *= vscale;
- vfpacc3x1 *= vscale;
-
- long vrndacc0x0 = lrintf(vfpacc0x0);
- long vrndacc0x1 = lrintf(vfpacc0x1);
- long vrndacc1x0 = lrintf(vfpacc1x0);
- long vrndacc1x1 = lrintf(vfpacc1x1);
- long vrndacc2x0 = lrintf(vfpacc2x0);
- long vrndacc2x1 = lrintf(vfpacc2x1);
- long vrndacc3x0 = lrintf(vfpacc3x0);
- long vrndacc3x1 = lrintf(vfpacc3x1);
-
- const long voutput_min_less_zero_point = params->fp32_scalar_lrint.output_min_less_zero_point;
- vrndacc0x0 = XNN_UNPREDICTABLE(vrndacc0x0 < voutput_min_less_zero_point) ? voutput_min_less_zero_point : vrndacc0x0;
- vrndacc0x1 = XNN_UNPREDICTABLE(vrndacc0x1 < voutput_min_less_zero_point) ? voutput_min_less_zero_point : vrndacc0x1;
- vrndacc1x0 = XNN_UNPREDICTABLE(vrndacc1x0 < voutput_min_less_zero_point) ? voutput_min_less_zero_point : vrndacc1x0;
- vrndacc1x1 = XNN_UNPREDICTABLE(vrndacc1x1 < voutput_min_less_zero_point) ? voutput_min_less_zero_point : vrndacc1x1;
- vrndacc2x0 = XNN_UNPREDICTABLE(vrndacc2x0 < voutput_min_less_zero_point) ? voutput_min_less_zero_point : vrndacc2x0;
- vrndacc2x1 = XNN_UNPREDICTABLE(vrndacc2x1 < voutput_min_less_zero_point) ? voutput_min_less_zero_point : vrndacc2x1;
- vrndacc3x0 = XNN_UNPREDICTABLE(vrndacc3x0 < voutput_min_less_zero_point) ? voutput_min_less_zero_point : vrndacc3x0;
- vrndacc3x1 = XNN_UNPREDICTABLE(vrndacc3x1 < voutput_min_less_zero_point) ? voutput_min_less_zero_point : vrndacc3x1;
-
- const long voutput_max_less_zero_point = params->fp32_scalar_lrint.output_max_less_zero_point;
- vrndacc0x0 = XNN_UNPREDICTABLE(vrndacc0x0 > voutput_max_less_zero_point) ? voutput_max_less_zero_point : vrndacc0x0;
- vrndacc0x1 = XNN_UNPREDICTABLE(vrndacc0x1 > voutput_max_less_zero_point) ? voutput_max_less_zero_point : vrndacc0x1;
- vrndacc1x0 = XNN_UNPREDICTABLE(vrndacc1x0 > voutput_max_less_zero_point) ? voutput_max_less_zero_point : vrndacc1x0;
- vrndacc1x1 = XNN_UNPREDICTABLE(vrndacc1x1 > voutput_max_less_zero_point) ? voutput_max_less_zero_point : vrndacc1x1;
- vrndacc2x0 = XNN_UNPREDICTABLE(vrndacc2x0 > voutput_max_less_zero_point) ? voutput_max_less_zero_point : vrndacc2x0;
- vrndacc2x1 = XNN_UNPREDICTABLE(vrndacc2x1 > voutput_max_less_zero_point) ? voutput_max_less_zero_point : vrndacc2x1;
- vrndacc3x0 = XNN_UNPREDICTABLE(vrndacc3x0 > voutput_max_less_zero_point) ? voutput_max_less_zero_point : vrndacc3x0;
- vrndacc3x1 = XNN_UNPREDICTABLE(vrndacc3x1 > voutput_max_less_zero_point) ? voutput_max_less_zero_point : vrndacc3x1;
-
- const int32_t voutput_zero_point = params->fp32_scalar_lrint.output_zero_point;
- int32_t vout0x0 = (int32_t) vrndacc0x0 + voutput_zero_point;
- int32_t vout0x1 = (int32_t) vrndacc0x1 + voutput_zero_point;
- int32_t vout1x0 = (int32_t) vrndacc1x0 + voutput_zero_point;
- int32_t vout1x1 = (int32_t) vrndacc1x1 + voutput_zero_point;
- int32_t vout2x0 = (int32_t) vrndacc2x0 + voutput_zero_point;
- int32_t vout2x1 = (int32_t) vrndacc2x1 + voutput_zero_point;
- int32_t vout3x0 = (int32_t) vrndacc3x0 + voutput_zero_point;
- int32_t vout3x1 = (int32_t) vrndacc3x1 + voutput_zero_point;
-
- if XNN_LIKELY(nc >= 2) {
- c3[0] = (uint8_t) vout3x0;
- c3[1] = (uint8_t) vout3x1;
- c2[0] = (uint8_t) vout2x0;
- c2[1] = (uint8_t) vout2x1;
- c1[0] = (uint8_t) vout1x0;
- c1[1] = (uint8_t) vout1x1;
- c0[0] = (uint8_t) vout0x0;
- c0[1] = (uint8_t) vout0x1;
-
- c3 = (uint8_t*) ((uintptr_t) c3 + cn_stride);
- c2 = (uint8_t*) ((uintptr_t) c2 + cn_stride);
- c1 = (uint8_t*) ((uintptr_t) c1 + cn_stride);
- c0 = (uint8_t*) ((uintptr_t) c0 + cn_stride);
-
- a = (const uint8_t**restrict) ((uintptr_t) a - ks);
- nc -= 2;
- } else {
- if (nc & 1) {
- c3[0] = (uint8_t) vout3x0;
- c2[0] = (uint8_t) vout2x0;
- c1[0] = (uint8_t) vout1x0;
- c0[0] = (uint8_t) vout0x0;
- }
-
- nc = 0;
- }
- } while (nc != 0);
-}
diff --git a/src/qu8-igemm/gen/4x2-minmax-fp32-scalar-magic.c b/src/qu8-igemm/gen/4x2-minmax-fp32-scalar-lrintf.c
similarity index 79%
copy from src/qu8-igemm/gen/4x2-minmax-fp32-scalar-magic.c
copy to src/qu8-igemm/gen/4x2-minmax-fp32-scalar-lrintf.c
index e952273..4689e43 100644
--- a/src/qu8-igemm/gen/4x2-minmax-fp32-scalar-magic.c
+++ b/src/qu8-igemm/gen/4x2-minmax-fp32-scalar-lrintf.c
@@ -8,14 +8,13 @@
// LICENSE file in the root directory of this source tree.
#include <assert.h>
-
-#include <fp16.h>
+#include <math.h>
#include <xnnpack/math.h>
#include <xnnpack/gemm.h>
-void xnn_qu8_igemm_minmax_fp32_ukernel_4x2__scalar_magic(
+void xnn_qu8_igemm_minmax_fp32_ukernel_4x2__scalar_lrintf(
size_t mr,
size_t nc,
size_t kc,
@@ -53,7 +52,7 @@
c3 = c2;
}
- const int32_t vb_zero_point = params->fp32_scalar_magic.kernel_zero_point;
+ const int32_t vb_zero_point = params->fp32_scalar_lrintf.kernel_zero_point;
do {
int32_t vacc0x0 = ((const int32_t*) w)[0];
int32_t vacc0x1 = ((const int32_t*) w)[1];
@@ -123,7 +122,7 @@
float vfpacc3x0 = (float) vacc3x0;
float vfpacc3x1 = (float) vacc3x1;
- const float vscale = params->fp32_scalar_magic.scale;
+ const float vscale = params->fp32_scalar_lrintf.scale;
vfpacc0x0 *= vscale;
vfpacc0x1 *= vscale;
vfpacc1x0 *= vscale;
@@ -133,7 +132,7 @@
vfpacc3x0 *= vscale;
vfpacc3x1 *= vscale;
- const float voutput_min_less_zero_point = params->fp32_scalar_magic.output_min_less_zero_point;
+ const float voutput_min_less_zero_point = params->fp32_scalar_lrintf.output_min_less_zero_point;
vfpacc0x0 = math_max_f32(vfpacc0x0, voutput_min_less_zero_point);
vfpacc0x1 = math_max_f32(vfpacc0x1, voutput_min_less_zero_point);
vfpacc1x0 = math_max_f32(vfpacc1x0, voutput_min_less_zero_point);
@@ -143,7 +142,7 @@
vfpacc3x0 = math_max_f32(vfpacc3x0, voutput_min_less_zero_point);
vfpacc3x1 = math_max_f32(vfpacc3x1, voutput_min_less_zero_point);
- const float voutput_max_less_zero_point = params->fp32_scalar_magic.output_max_less_zero_point;
+ const float voutput_max_less_zero_point = params->fp32_scalar_lrintf.output_max_less_zero_point;
vfpacc0x0 = math_min_f32(vfpacc0x0, voutput_max_less_zero_point);
vfpacc0x1 = math_min_f32(vfpacc0x1, voutput_max_less_zero_point);
vfpacc1x0 = math_min_f32(vfpacc1x0, voutput_max_less_zero_point);
@@ -153,25 +152,24 @@
vfpacc3x0 = math_min_f32(vfpacc3x0, voutput_max_less_zero_point);
vfpacc3x1 = math_min_f32(vfpacc3x1, voutput_max_less_zero_point);
- const float vmagic_bias = params->fp32_scalar_magic.magic_bias;
- vfpacc0x0 += vmagic_bias;
- vfpacc0x1 += vmagic_bias;
- vfpacc1x0 += vmagic_bias;
- vfpacc1x1 += vmagic_bias;
- vfpacc2x0 += vmagic_bias;
- vfpacc2x1 += vmagic_bias;
- vfpacc3x0 += vmagic_bias;
- vfpacc3x1 += vmagic_bias;
+ const int32_t vrndacc0x0 = (int32_t) lrintf(vfpacc0x0);
+ const int32_t vrndacc0x1 = (int32_t) lrintf(vfpacc0x1);
+ const int32_t vrndacc1x0 = (int32_t) lrintf(vfpacc1x0);
+ const int32_t vrndacc1x1 = (int32_t) lrintf(vfpacc1x1);
+ const int32_t vrndacc2x0 = (int32_t) lrintf(vfpacc2x0);
+ const int32_t vrndacc2x1 = (int32_t) lrintf(vfpacc2x1);
+ const int32_t vrndacc3x0 = (int32_t) lrintf(vfpacc3x0);
+ const int32_t vrndacc3x1 = (int32_t) lrintf(vfpacc3x1);
- const int32_t vmagic_bias_less_output_zero_point = params->fp32_scalar_magic.magic_bias_less_output_zero_point;
- int32_t vout0x0 = (int32_t) fp32_to_bits(vfpacc0x0) - vmagic_bias_less_output_zero_point;
- int32_t vout0x1 = (int32_t) fp32_to_bits(vfpacc0x1) - vmagic_bias_less_output_zero_point;
- int32_t vout1x0 = (int32_t) fp32_to_bits(vfpacc1x0) - vmagic_bias_less_output_zero_point;
- int32_t vout1x1 = (int32_t) fp32_to_bits(vfpacc1x1) - vmagic_bias_less_output_zero_point;
- int32_t vout2x0 = (int32_t) fp32_to_bits(vfpacc2x0) - vmagic_bias_less_output_zero_point;
- int32_t vout2x1 = (int32_t) fp32_to_bits(vfpacc2x1) - vmagic_bias_less_output_zero_point;
- int32_t vout3x0 = (int32_t) fp32_to_bits(vfpacc3x0) - vmagic_bias_less_output_zero_point;
- int32_t vout3x1 = (int32_t) fp32_to_bits(vfpacc3x1) - vmagic_bias_less_output_zero_point;
+ const int32_t voutput_zero_point = params->fp32_scalar_lrintf.output_zero_point;
+ int32_t vout0x0 = vrndacc0x0 + voutput_zero_point;
+ int32_t vout0x1 = vrndacc0x1 + voutput_zero_point;
+ int32_t vout1x0 = vrndacc1x0 + voutput_zero_point;
+ int32_t vout1x1 = vrndacc1x1 + voutput_zero_point;
+ int32_t vout2x0 = vrndacc2x0 + voutput_zero_point;
+ int32_t vout2x1 = vrndacc2x1 + voutput_zero_point;
+ int32_t vout3x0 = vrndacc3x0 + voutput_zero_point;
+ int32_t vout3x1 = vrndacc3x1 + voutput_zero_point;
if XNN_LIKELY(nc >= 2) {
c3[0] = (uint8_t) vout3x0;
diff --git a/src/qu8-igemm/gen/4x4-minmax-fp32-scalar-magic.c b/src/qu8-igemm/gen/4x4-minmax-fp32-scalar-fmagic.c
similarity index 96%
rename from src/qu8-igemm/gen/4x4-minmax-fp32-scalar-magic.c
rename to src/qu8-igemm/gen/4x4-minmax-fp32-scalar-fmagic.c
index 54c6322..2aed0cb 100644
--- a/src/qu8-igemm/gen/4x4-minmax-fp32-scalar-magic.c
+++ b/src/qu8-igemm/gen/4x4-minmax-fp32-scalar-fmagic.c
@@ -15,7 +15,7 @@
#include <xnnpack/gemm.h>
-void xnn_qu8_igemm_minmax_fp32_ukernel_4x4__scalar_magic(
+void xnn_qu8_igemm_minmax_fp32_ukernel_4x4__scalar_fmagic(
size_t mr,
size_t nc,
size_t kc,
@@ -53,7 +53,7 @@
c3 = c2;
}
- const int32_t vb_zero_point = params->fp32_scalar_magic.kernel_zero_point;
+ const int32_t vb_zero_point = params->fp32_scalar_fmagic.kernel_zero_point;
do {
int32_t vacc0x0 = ((const int32_t*) w)[0];
int32_t vacc0x1 = ((const int32_t*) w)[1];
@@ -149,7 +149,7 @@
float vfpacc3x2 = (float) vacc3x2;
float vfpacc3x3 = (float) vacc3x3;
- const float vscale = params->fp32_scalar_magic.scale;
+ const float vscale = params->fp32_scalar_fmagic.scale;
vfpacc0x0 *= vscale;
vfpacc0x1 *= vscale;
vfpacc0x2 *= vscale;
@@ -167,7 +167,7 @@
vfpacc3x2 *= vscale;
vfpacc3x3 *= vscale;
- const float voutput_min_less_zero_point = params->fp32_scalar_magic.output_min_less_zero_point;
+ const float voutput_min_less_zero_point = params->fp32_scalar_fmagic.output_min_less_zero_point;
vfpacc0x0 = math_max_f32(vfpacc0x0, voutput_min_less_zero_point);
vfpacc0x1 = math_max_f32(vfpacc0x1, voutput_min_less_zero_point);
vfpacc0x2 = math_max_f32(vfpacc0x2, voutput_min_less_zero_point);
@@ -185,7 +185,7 @@
vfpacc3x2 = math_max_f32(vfpacc3x2, voutput_min_less_zero_point);
vfpacc3x3 = math_max_f32(vfpacc3x3, voutput_min_less_zero_point);
- const float voutput_max_less_zero_point = params->fp32_scalar_magic.output_max_less_zero_point;
+ const float voutput_max_less_zero_point = params->fp32_scalar_fmagic.output_max_less_zero_point;
vfpacc0x0 = math_min_f32(vfpacc0x0, voutput_max_less_zero_point);
vfpacc0x1 = math_min_f32(vfpacc0x1, voutput_max_less_zero_point);
vfpacc0x2 = math_min_f32(vfpacc0x2, voutput_max_less_zero_point);
@@ -203,7 +203,7 @@
vfpacc3x2 = math_min_f32(vfpacc3x2, voutput_max_less_zero_point);
vfpacc3x3 = math_min_f32(vfpacc3x3, voutput_max_less_zero_point);
- const float vmagic_bias = params->fp32_scalar_magic.magic_bias;
+ const float vmagic_bias = params->fp32_scalar_fmagic.magic_bias;
vfpacc0x0 += vmagic_bias;
vfpacc0x1 += vmagic_bias;
vfpacc0x2 += vmagic_bias;
@@ -221,7 +221,7 @@
vfpacc3x2 += vmagic_bias;
vfpacc3x3 += vmagic_bias;
- const int32_t vmagic_bias_less_output_zero_point = params->fp32_scalar_magic.magic_bias_less_output_zero_point;
+ const int32_t vmagic_bias_less_output_zero_point = params->fp32_scalar_fmagic.magic_bias_less_output_zero_point;
int32_t vout0x0 = (int32_t) fp32_to_bits(vfpacc0x0) - vmagic_bias_less_output_zero_point;
int32_t vout0x1 = (int32_t) fp32_to_bits(vfpacc0x1) - vmagic_bias_less_output_zero_point;
int32_t vout0x2 = (int32_t) fp32_to_bits(vfpacc0x2) - vmagic_bias_less_output_zero_point;
diff --git a/src/qu8-igemm/gen/4x4-minmax-fp32-scalar-lrint.c b/src/qu8-igemm/gen/4x4-minmax-fp32-scalar-lrint.c
deleted file mode 100644
index 0fb2cfa..0000000
--- a/src/qu8-igemm/gen/4x4-minmax-fp32-scalar-lrint.c
+++ /dev/null
@@ -1,294 +0,0 @@
-// Auto-generated file. Do not edit!
-// Template: src/qs8-igemm/scalar.c.in
-// Generator: tools/xngen
-//
-// Copyright 2021 Google LLC
-//
-// This source code is licensed under the BSD-style license found in the
-// LICENSE file in the root directory of this source tree.
-
-#include <assert.h>
-#include <math.h>
-
-#include <xnnpack/math.h>
-#include <xnnpack/gemm.h>
-
-
-void xnn_qu8_igemm_minmax_fp32_ukernel_4x4__scalar_lrint(
- size_t mr,
- size_t nc,
- size_t kc,
- size_t ks,
- const uint8_t**restrict a,
- const void*restrict w,
- uint8_t*restrict c,
- size_t cm_stride,
- size_t cn_stride,
- size_t a_offset,
- const uint8_t* zero,
- const union xnn_qu8_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
-{
- assert(mr != 0);
- assert(mr <= 4);
- assert(nc != 0);
- assert(kc != 0);
- assert(ks != 0);
- assert(ks % (4 * sizeof(void*)) == 0);
- assert(a != NULL);
- assert(w != NULL);
- assert(c != NULL);
-
- uint8_t* c0 = c;
- uint8_t* c1 = (uint8_t*) ((uintptr_t) c0 + cm_stride);
- if XNN_UNPREDICTABLE(mr < 2) {
- c1 = c0;
- }
- uint8_t* c2 = (uint8_t*) ((uintptr_t) c1 + cm_stride);
- if XNN_UNPREDICTABLE(mr <= 2) {
- c2 = c1;
- }
- uint8_t* c3 = (uint8_t*) ((uintptr_t) c2 + cm_stride);
- if XNN_UNPREDICTABLE(mr != 4) {
- c3 = c2;
- }
-
- const int32_t vb_zero_point = params->fp32_scalar_lrint.kernel_zero_point;
- do {
- int32_t vacc0x0 = ((const int32_t*) w)[0];
- int32_t vacc0x1 = ((const int32_t*) w)[1];
- int32_t vacc0x2 = ((const int32_t*) w)[2];
- int32_t vacc0x3 = ((const int32_t*) w)[3];
- int32_t vacc1x0 = vacc0x0;
- int32_t vacc1x1 = vacc0x1;
- int32_t vacc1x2 = vacc0x2;
- int32_t vacc1x3 = vacc0x3;
- int32_t vacc2x0 = vacc0x0;
- int32_t vacc2x1 = vacc0x1;
- int32_t vacc2x2 = vacc0x2;
- int32_t vacc2x3 = vacc0x3;
- int32_t vacc3x0 = vacc0x0;
- int32_t vacc3x1 = vacc0x1;
- int32_t vacc3x2 = vacc0x2;
- int32_t vacc3x3 = vacc0x3;
- w = (const void*) ((const int32_t*) w + 4);
-
- size_t p = ks;
- do {
- const uint8_t* restrict a0 = a[0];
- assert(a0 != NULL);
- if XNN_UNPREDICTABLE(a0 != zero) {
- a0 = (const uint8_t*) ((uintptr_t) a0 + a_offset);
- }
- const uint8_t* restrict a1 = a[1];
- assert(a1 != NULL);
- if XNN_UNPREDICTABLE(a1 != zero) {
- a1 = (const uint8_t*) ((uintptr_t) a1 + a_offset);
- }
- const uint8_t* restrict a2 = a[2];
- assert(a2 != NULL);
- if XNN_UNPREDICTABLE(a2 != zero) {
- a2 = (const uint8_t*) ((uintptr_t) a2 + a_offset);
- }
- const uint8_t* restrict a3 = a[3];
- assert(a3 != NULL);
- if XNN_UNPREDICTABLE(a3 != zero) {
- a3 = (const uint8_t*) ((uintptr_t) a3 + a_offset);
- }
- a += 4;
-
- size_t k = kc;
- do {
- const int32_t va0 = (int32_t) (uint32_t) *a0++;
- const int32_t va1 = (int32_t) (uint32_t) *a1++;
- const int32_t va2 = (int32_t) (uint32_t) *a2++;
- const int32_t va3 = (int32_t) (uint32_t) *a3++;
-
- const int32_t vb0 = (int32_t) (uint32_t) ((const uint8_t*) w)[0] - vb_zero_point;
- const int32_t vb1 = (int32_t) (uint32_t) ((const uint8_t*) w)[1] - vb_zero_point;
- const int32_t vb2 = (int32_t) (uint32_t) ((const uint8_t*) w)[2] - vb_zero_point;
- const int32_t vb3 = (int32_t) (uint32_t) ((const uint8_t*) w)[3] - vb_zero_point;
- w = (const void*) ((const uint8_t*) w + 4);
-
- vacc0x0 += va0 * vb0;
- vacc0x1 += va0 * vb1;
- vacc0x2 += va0 * vb2;
- vacc0x3 += va0 * vb3;
- vacc1x0 += va1 * vb0;
- vacc1x1 += va1 * vb1;
- vacc1x2 += va1 * vb2;
- vacc1x3 += va1 * vb3;
- vacc2x0 += va2 * vb0;
- vacc2x1 += va2 * vb1;
- vacc2x2 += va2 * vb2;
- vacc2x3 += va2 * vb3;
- vacc3x0 += va3 * vb0;
- vacc3x1 += va3 * vb1;
- vacc3x2 += va3 * vb2;
- vacc3x3 += va3 * vb3;
-
- k -= sizeof(uint8_t);
- } while (k != 0);
- p -= 4 * sizeof(void*);
- } while (p != 0);
-
- float vfpacc0x0 = (float) vacc0x0;
- float vfpacc0x1 = (float) vacc0x1;
- float vfpacc0x2 = (float) vacc0x2;
- float vfpacc0x3 = (float) vacc0x3;
- float vfpacc1x0 = (float) vacc1x0;
- float vfpacc1x1 = (float) vacc1x1;
- float vfpacc1x2 = (float) vacc1x2;
- float vfpacc1x3 = (float) vacc1x3;
- float vfpacc2x0 = (float) vacc2x0;
- float vfpacc2x1 = (float) vacc2x1;
- float vfpacc2x2 = (float) vacc2x2;
- float vfpacc2x3 = (float) vacc2x3;
- float vfpacc3x0 = (float) vacc3x0;
- float vfpacc3x1 = (float) vacc3x1;
- float vfpacc3x2 = (float) vacc3x2;
- float vfpacc3x3 = (float) vacc3x3;
-
- const float vscale = params->fp32_scalar_lrint.scale;
- vfpacc0x0 *= vscale;
- vfpacc0x1 *= vscale;
- vfpacc0x2 *= vscale;
- vfpacc0x3 *= vscale;
- vfpacc1x0 *= vscale;
- vfpacc1x1 *= vscale;
- vfpacc1x2 *= vscale;
- vfpacc1x3 *= vscale;
- vfpacc2x0 *= vscale;
- vfpacc2x1 *= vscale;
- vfpacc2x2 *= vscale;
- vfpacc2x3 *= vscale;
- vfpacc3x0 *= vscale;
- vfpacc3x1 *= vscale;
- vfpacc3x2 *= vscale;
- vfpacc3x3 *= vscale;
-
- long vrndacc0x0 = lrintf(vfpacc0x0);
- long vrndacc0x1 = lrintf(vfpacc0x1);
- long vrndacc0x2 = lrintf(vfpacc0x2);
- long vrndacc0x3 = lrintf(vfpacc0x3);
- long vrndacc1x0 = lrintf(vfpacc1x0);
- long vrndacc1x1 = lrintf(vfpacc1x1);
- long vrndacc1x2 = lrintf(vfpacc1x2);
- long vrndacc1x3 = lrintf(vfpacc1x3);
- long vrndacc2x0 = lrintf(vfpacc2x0);
- long vrndacc2x1 = lrintf(vfpacc2x1);
- long vrndacc2x2 = lrintf(vfpacc2x2);
- long vrndacc2x3 = lrintf(vfpacc2x3);
- long vrndacc3x0 = lrintf(vfpacc3x0);
- long vrndacc3x1 = lrintf(vfpacc3x1);
- long vrndacc3x2 = lrintf(vfpacc3x2);
- long vrndacc3x3 = lrintf(vfpacc3x3);
-
- const long voutput_min_less_zero_point = params->fp32_scalar_lrint.output_min_less_zero_point;
- vrndacc0x0 = XNN_UNPREDICTABLE(vrndacc0x0 < voutput_min_less_zero_point) ? voutput_min_less_zero_point : vrndacc0x0;
- vrndacc0x1 = XNN_UNPREDICTABLE(vrndacc0x1 < voutput_min_less_zero_point) ? voutput_min_less_zero_point : vrndacc0x1;
- vrndacc0x2 = XNN_UNPREDICTABLE(vrndacc0x2 < voutput_min_less_zero_point) ? voutput_min_less_zero_point : vrndacc0x2;
- vrndacc0x3 = XNN_UNPREDICTABLE(vrndacc0x3 < voutput_min_less_zero_point) ? voutput_min_less_zero_point : vrndacc0x3;
- vrndacc1x0 = XNN_UNPREDICTABLE(vrndacc1x0 < voutput_min_less_zero_point) ? voutput_min_less_zero_point : vrndacc1x0;
- vrndacc1x1 = XNN_UNPREDICTABLE(vrndacc1x1 < voutput_min_less_zero_point) ? voutput_min_less_zero_point : vrndacc1x1;
- vrndacc1x2 = XNN_UNPREDICTABLE(vrndacc1x2 < voutput_min_less_zero_point) ? voutput_min_less_zero_point : vrndacc1x2;
- vrndacc1x3 = XNN_UNPREDICTABLE(vrndacc1x3 < voutput_min_less_zero_point) ? voutput_min_less_zero_point : vrndacc1x3;
- vrndacc2x0 = XNN_UNPREDICTABLE(vrndacc2x0 < voutput_min_less_zero_point) ? voutput_min_less_zero_point : vrndacc2x0;
- vrndacc2x1 = XNN_UNPREDICTABLE(vrndacc2x1 < voutput_min_less_zero_point) ? voutput_min_less_zero_point : vrndacc2x1;
- vrndacc2x2 = XNN_UNPREDICTABLE(vrndacc2x2 < voutput_min_less_zero_point) ? voutput_min_less_zero_point : vrndacc2x2;
- vrndacc2x3 = XNN_UNPREDICTABLE(vrndacc2x3 < voutput_min_less_zero_point) ? voutput_min_less_zero_point : vrndacc2x3;
- vrndacc3x0 = XNN_UNPREDICTABLE(vrndacc3x0 < voutput_min_less_zero_point) ? voutput_min_less_zero_point : vrndacc3x0;
- vrndacc3x1 = XNN_UNPREDICTABLE(vrndacc3x1 < voutput_min_less_zero_point) ? voutput_min_less_zero_point : vrndacc3x1;
- vrndacc3x2 = XNN_UNPREDICTABLE(vrndacc3x2 < voutput_min_less_zero_point) ? voutput_min_less_zero_point : vrndacc3x2;
- vrndacc3x3 = XNN_UNPREDICTABLE(vrndacc3x3 < voutput_min_less_zero_point) ? voutput_min_less_zero_point : vrndacc3x3;
-
- const long voutput_max_less_zero_point = params->fp32_scalar_lrint.output_max_less_zero_point;
- vrndacc0x0 = XNN_UNPREDICTABLE(vrndacc0x0 > voutput_max_less_zero_point) ? voutput_max_less_zero_point : vrndacc0x0;
- vrndacc0x1 = XNN_UNPREDICTABLE(vrndacc0x1 > voutput_max_less_zero_point) ? voutput_max_less_zero_point : vrndacc0x1;
- vrndacc0x2 = XNN_UNPREDICTABLE(vrndacc0x2 > voutput_max_less_zero_point) ? voutput_max_less_zero_point : vrndacc0x2;
- vrndacc0x3 = XNN_UNPREDICTABLE(vrndacc0x3 > voutput_max_less_zero_point) ? voutput_max_less_zero_point : vrndacc0x3;
- vrndacc1x0 = XNN_UNPREDICTABLE(vrndacc1x0 > voutput_max_less_zero_point) ? voutput_max_less_zero_point : vrndacc1x0;
- vrndacc1x1 = XNN_UNPREDICTABLE(vrndacc1x1 > voutput_max_less_zero_point) ? voutput_max_less_zero_point : vrndacc1x1;
- vrndacc1x2 = XNN_UNPREDICTABLE(vrndacc1x2 > voutput_max_less_zero_point) ? voutput_max_less_zero_point : vrndacc1x2;
- vrndacc1x3 = XNN_UNPREDICTABLE(vrndacc1x3 > voutput_max_less_zero_point) ? voutput_max_less_zero_point : vrndacc1x3;
- vrndacc2x0 = XNN_UNPREDICTABLE(vrndacc2x0 > voutput_max_less_zero_point) ? voutput_max_less_zero_point : vrndacc2x0;
- vrndacc2x1 = XNN_UNPREDICTABLE(vrndacc2x1 > voutput_max_less_zero_point) ? voutput_max_less_zero_point : vrndacc2x1;
- vrndacc2x2 = XNN_UNPREDICTABLE(vrndacc2x2 > voutput_max_less_zero_point) ? voutput_max_less_zero_point : vrndacc2x2;
- vrndacc2x3 = XNN_UNPREDICTABLE(vrndacc2x3 > voutput_max_less_zero_point) ? voutput_max_less_zero_point : vrndacc2x3;
- vrndacc3x0 = XNN_UNPREDICTABLE(vrndacc3x0 > voutput_max_less_zero_point) ? voutput_max_less_zero_point : vrndacc3x0;
- vrndacc3x1 = XNN_UNPREDICTABLE(vrndacc3x1 > voutput_max_less_zero_point) ? voutput_max_less_zero_point : vrndacc3x1;
- vrndacc3x2 = XNN_UNPREDICTABLE(vrndacc3x2 > voutput_max_less_zero_point) ? voutput_max_less_zero_point : vrndacc3x2;
- vrndacc3x3 = XNN_UNPREDICTABLE(vrndacc3x3 > voutput_max_less_zero_point) ? voutput_max_less_zero_point : vrndacc3x3;
-
- const int32_t voutput_zero_point = params->fp32_scalar_lrint.output_zero_point;
- int32_t vout0x0 = (int32_t) vrndacc0x0 + voutput_zero_point;
- int32_t vout0x1 = (int32_t) vrndacc0x1 + voutput_zero_point;
- int32_t vout0x2 = (int32_t) vrndacc0x2 + voutput_zero_point;
- int32_t vout0x3 = (int32_t) vrndacc0x3 + voutput_zero_point;
- int32_t vout1x0 = (int32_t) vrndacc1x0 + voutput_zero_point;
- int32_t vout1x1 = (int32_t) vrndacc1x1 + voutput_zero_point;
- int32_t vout1x2 = (int32_t) vrndacc1x2 + voutput_zero_point;
- int32_t vout1x3 = (int32_t) vrndacc1x3 + voutput_zero_point;
- int32_t vout2x0 = (int32_t) vrndacc2x0 + voutput_zero_point;
- int32_t vout2x1 = (int32_t) vrndacc2x1 + voutput_zero_point;
- int32_t vout2x2 = (int32_t) vrndacc2x2 + voutput_zero_point;
- int32_t vout2x3 = (int32_t) vrndacc2x3 + voutput_zero_point;
- int32_t vout3x0 = (int32_t) vrndacc3x0 + voutput_zero_point;
- int32_t vout3x1 = (int32_t) vrndacc3x1 + voutput_zero_point;
- int32_t vout3x2 = (int32_t) vrndacc3x2 + voutput_zero_point;
- int32_t vout3x3 = (int32_t) vrndacc3x3 + voutput_zero_point;
-
- if XNN_LIKELY(nc >= 4) {
- c3[0] = (uint8_t) vout3x0;
- c3[1] = (uint8_t) vout3x1;
- c3[2] = (uint8_t) vout3x2;
- c3[3] = (uint8_t) vout3x3;
- c2[0] = (uint8_t) vout2x0;
- c2[1] = (uint8_t) vout2x1;
- c2[2] = (uint8_t) vout2x2;
- c2[3] = (uint8_t) vout2x3;
- c1[0] = (uint8_t) vout1x0;
- c1[1] = (uint8_t) vout1x1;
- c1[2] = (uint8_t) vout1x2;
- c1[3] = (uint8_t) vout1x3;
- c0[0] = (uint8_t) vout0x0;
- c0[1] = (uint8_t) vout0x1;
- c0[2] = (uint8_t) vout0x2;
- c0[3] = (uint8_t) vout0x3;
-
- c3 = (uint8_t*) ((uintptr_t) c3 + cn_stride);
- c2 = (uint8_t*) ((uintptr_t) c2 + cn_stride);
- c1 = (uint8_t*) ((uintptr_t) c1 + cn_stride);
- c0 = (uint8_t*) ((uintptr_t) c0 + cn_stride);
-
- a = (const uint8_t**restrict) ((uintptr_t) a - ks);
- nc -= 4;
- } else {
- if (nc & 2) {
- c3[0] = (uint8_t) vout3x0;
- c3[1] = (uint8_t) vout3x1;
- vout3x0 = vout3x2;
- c3 += 2;
- c2[0] = (uint8_t) vout2x0;
- c2[1] = (uint8_t) vout2x1;
- vout2x0 = vout2x2;
- c2 += 2;
- c1[0] = (uint8_t) vout1x0;
- c1[1] = (uint8_t) vout1x1;
- vout1x0 = vout1x2;
- c1 += 2;
- c0[0] = (uint8_t) vout0x0;
- c0[1] = (uint8_t) vout0x1;
- vout0x0 = vout0x2;
- c0 += 2;
- }
- if (nc & 1) {
- c3[0] = (uint8_t) vout3x0;
- c2[0] = (uint8_t) vout2x0;
- c1[0] = (uint8_t) vout1x0;
- c0[0] = (uint8_t) vout0x0;
- }
-
- nc = 0;
- }
- } while (nc != 0);
-}
diff --git a/src/qu8-igemm/gen/4x4-minmax-fp32-scalar-magic.c b/src/qu8-igemm/gen/4x4-minmax-fp32-scalar-lrintf.c
similarity index 77%
copy from src/qu8-igemm/gen/4x4-minmax-fp32-scalar-magic.c
copy to src/qu8-igemm/gen/4x4-minmax-fp32-scalar-lrintf.c
index 54c6322..ed2439b 100644
--- a/src/qu8-igemm/gen/4x4-minmax-fp32-scalar-magic.c
+++ b/src/qu8-igemm/gen/4x4-minmax-fp32-scalar-lrintf.c
@@ -8,14 +8,13 @@
// LICENSE file in the root directory of this source tree.
#include <assert.h>
-
-#include <fp16.h>
+#include <math.h>
#include <xnnpack/math.h>
#include <xnnpack/gemm.h>
-void xnn_qu8_igemm_minmax_fp32_ukernel_4x4__scalar_magic(
+void xnn_qu8_igemm_minmax_fp32_ukernel_4x4__scalar_lrintf(
size_t mr,
size_t nc,
size_t kc,
@@ -53,7 +52,7 @@
c3 = c2;
}
- const int32_t vb_zero_point = params->fp32_scalar_magic.kernel_zero_point;
+ const int32_t vb_zero_point = params->fp32_scalar_lrintf.kernel_zero_point;
do {
int32_t vacc0x0 = ((const int32_t*) w)[0];
int32_t vacc0x1 = ((const int32_t*) w)[1];
@@ -149,7 +148,7 @@
float vfpacc3x2 = (float) vacc3x2;
float vfpacc3x3 = (float) vacc3x3;
- const float vscale = params->fp32_scalar_magic.scale;
+ const float vscale = params->fp32_scalar_lrintf.scale;
vfpacc0x0 *= vscale;
vfpacc0x1 *= vscale;
vfpacc0x2 *= vscale;
@@ -167,7 +166,7 @@
vfpacc3x2 *= vscale;
vfpacc3x3 *= vscale;
- const float voutput_min_less_zero_point = params->fp32_scalar_magic.output_min_less_zero_point;
+ const float voutput_min_less_zero_point = params->fp32_scalar_lrintf.output_min_less_zero_point;
vfpacc0x0 = math_max_f32(vfpacc0x0, voutput_min_less_zero_point);
vfpacc0x1 = math_max_f32(vfpacc0x1, voutput_min_less_zero_point);
vfpacc0x2 = math_max_f32(vfpacc0x2, voutput_min_less_zero_point);
@@ -185,7 +184,7 @@
vfpacc3x2 = math_max_f32(vfpacc3x2, voutput_min_less_zero_point);
vfpacc3x3 = math_max_f32(vfpacc3x3, voutput_min_less_zero_point);
- const float voutput_max_less_zero_point = params->fp32_scalar_magic.output_max_less_zero_point;
+ const float voutput_max_less_zero_point = params->fp32_scalar_lrintf.output_max_less_zero_point;
vfpacc0x0 = math_min_f32(vfpacc0x0, voutput_max_less_zero_point);
vfpacc0x1 = math_min_f32(vfpacc0x1, voutput_max_less_zero_point);
vfpacc0x2 = math_min_f32(vfpacc0x2, voutput_max_less_zero_point);
@@ -203,41 +202,40 @@
vfpacc3x2 = math_min_f32(vfpacc3x2, voutput_max_less_zero_point);
vfpacc3x3 = math_min_f32(vfpacc3x3, voutput_max_less_zero_point);
- const float vmagic_bias = params->fp32_scalar_magic.magic_bias;
- vfpacc0x0 += vmagic_bias;
- vfpacc0x1 += vmagic_bias;
- vfpacc0x2 += vmagic_bias;
- vfpacc0x3 += vmagic_bias;
- vfpacc1x0 += vmagic_bias;
- vfpacc1x1 += vmagic_bias;
- vfpacc1x2 += vmagic_bias;
- vfpacc1x3 += vmagic_bias;
- vfpacc2x0 += vmagic_bias;
- vfpacc2x1 += vmagic_bias;
- vfpacc2x2 += vmagic_bias;
- vfpacc2x3 += vmagic_bias;
- vfpacc3x0 += vmagic_bias;
- vfpacc3x1 += vmagic_bias;
- vfpacc3x2 += vmagic_bias;
- vfpacc3x3 += vmagic_bias;
+ const int32_t vrndacc0x0 = (int32_t) lrintf(vfpacc0x0);
+ const int32_t vrndacc0x1 = (int32_t) lrintf(vfpacc0x1);
+ const int32_t vrndacc0x2 = (int32_t) lrintf(vfpacc0x2);
+ const int32_t vrndacc0x3 = (int32_t) lrintf(vfpacc0x3);
+ const int32_t vrndacc1x0 = (int32_t) lrintf(vfpacc1x0);
+ const int32_t vrndacc1x1 = (int32_t) lrintf(vfpacc1x1);
+ const int32_t vrndacc1x2 = (int32_t) lrintf(vfpacc1x2);
+ const int32_t vrndacc1x3 = (int32_t) lrintf(vfpacc1x3);
+ const int32_t vrndacc2x0 = (int32_t) lrintf(vfpacc2x0);
+ const int32_t vrndacc2x1 = (int32_t) lrintf(vfpacc2x1);
+ const int32_t vrndacc2x2 = (int32_t) lrintf(vfpacc2x2);
+ const int32_t vrndacc2x3 = (int32_t) lrintf(vfpacc2x3);
+ const int32_t vrndacc3x0 = (int32_t) lrintf(vfpacc3x0);
+ const int32_t vrndacc3x1 = (int32_t) lrintf(vfpacc3x1);
+ const int32_t vrndacc3x2 = (int32_t) lrintf(vfpacc3x2);
+ const int32_t vrndacc3x3 = (int32_t) lrintf(vfpacc3x3);
- const int32_t vmagic_bias_less_output_zero_point = params->fp32_scalar_magic.magic_bias_less_output_zero_point;
- int32_t vout0x0 = (int32_t) fp32_to_bits(vfpacc0x0) - vmagic_bias_less_output_zero_point;
- int32_t vout0x1 = (int32_t) fp32_to_bits(vfpacc0x1) - vmagic_bias_less_output_zero_point;
- int32_t vout0x2 = (int32_t) fp32_to_bits(vfpacc0x2) - vmagic_bias_less_output_zero_point;
- int32_t vout0x3 = (int32_t) fp32_to_bits(vfpacc0x3) - vmagic_bias_less_output_zero_point;
- int32_t vout1x0 = (int32_t) fp32_to_bits(vfpacc1x0) - vmagic_bias_less_output_zero_point;
- int32_t vout1x1 = (int32_t) fp32_to_bits(vfpacc1x1) - vmagic_bias_less_output_zero_point;
- int32_t vout1x2 = (int32_t) fp32_to_bits(vfpacc1x2) - vmagic_bias_less_output_zero_point;
- int32_t vout1x3 = (int32_t) fp32_to_bits(vfpacc1x3) - vmagic_bias_less_output_zero_point;
- int32_t vout2x0 = (int32_t) fp32_to_bits(vfpacc2x0) - vmagic_bias_less_output_zero_point;
- int32_t vout2x1 = (int32_t) fp32_to_bits(vfpacc2x1) - vmagic_bias_less_output_zero_point;
- int32_t vout2x2 = (int32_t) fp32_to_bits(vfpacc2x2) - vmagic_bias_less_output_zero_point;
- int32_t vout2x3 = (int32_t) fp32_to_bits(vfpacc2x3) - vmagic_bias_less_output_zero_point;
- int32_t vout3x0 = (int32_t) fp32_to_bits(vfpacc3x0) - vmagic_bias_less_output_zero_point;
- int32_t vout3x1 = (int32_t) fp32_to_bits(vfpacc3x1) - vmagic_bias_less_output_zero_point;
- int32_t vout3x2 = (int32_t) fp32_to_bits(vfpacc3x2) - vmagic_bias_less_output_zero_point;
- int32_t vout3x3 = (int32_t) fp32_to_bits(vfpacc3x3) - vmagic_bias_less_output_zero_point;
+ const int32_t voutput_zero_point = params->fp32_scalar_lrintf.output_zero_point;
+ int32_t vout0x0 = vrndacc0x0 + voutput_zero_point;
+ int32_t vout0x1 = vrndacc0x1 + voutput_zero_point;
+ int32_t vout0x2 = vrndacc0x2 + voutput_zero_point;
+ int32_t vout0x3 = vrndacc0x3 + voutput_zero_point;
+ int32_t vout1x0 = vrndacc1x0 + voutput_zero_point;
+ int32_t vout1x1 = vrndacc1x1 + voutput_zero_point;
+ int32_t vout1x2 = vrndacc1x2 + voutput_zero_point;
+ int32_t vout1x3 = vrndacc1x3 + voutput_zero_point;
+ int32_t vout2x0 = vrndacc2x0 + voutput_zero_point;
+ int32_t vout2x1 = vrndacc2x1 + voutput_zero_point;
+ int32_t vout2x2 = vrndacc2x2 + voutput_zero_point;
+ int32_t vout2x3 = vrndacc2x3 + voutput_zero_point;
+ int32_t vout3x0 = vrndacc3x0 + voutput_zero_point;
+ int32_t vout3x1 = vrndacc3x1 + voutput_zero_point;
+ int32_t vout3x2 = vrndacc3x2 + voutput_zero_point;
+ int32_t vout3x3 = vrndacc3x3 + voutput_zero_point;
if XNN_LIKELY(nc >= 4) {
c3[0] = (uint8_t) vout3x0;
diff --git a/src/qu8-requantization/fp32-scalar-magic.c b/src/qu8-requantization/fp32-scalar-fmagic.c
similarity index 89%
rename from src/qu8-requantization/fp32-scalar-magic.c
rename to src/qu8-requantization/fp32-scalar-fmagic.c
index a9add39..d2deb58 100644
--- a/src/qu8-requantization/fp32-scalar-magic.c
+++ b/src/qu8-requantization/fp32-scalar-fmagic.c
@@ -17,7 +17,7 @@
#include <xnnpack/requantization-stubs.h>
-void xnn_qu8_requantize_fp32__scalar_magic(
+void xnn_qu8_requantize_fp32__scalar_fmagic(
size_t n,
const int32_t* input,
float scale,
@@ -30,8 +30,8 @@
assert(scale < 1.0f);
assert(scale >= 0x1.0p-32f);
- const float fmin = (float) ((int32_t)(uint32_t) qmin - (int32_t)(uint32_t) zero_point);
- const float fmax = (float) ((int32_t)(uint32_t) qmax - (int32_t)(uint32_t) zero_point);
+ const float fmin = (float) ((int32_t) (uint32_t) qmin - (int32_t) (uint32_t) zero_point);
+ const float fmax = (float) ((int32_t) (uint32_t) qmax - (int32_t) (uint32_t) zero_point);
const float fmagic = 12582912.0f;
const int32_t imagic = INT32_C(0x4B400000) - (int32_t)(uint32_t) zero_point;
for (; n != 0; n -= 4) {
diff --git a/src/qu8-requantization/fp32-scalar-lrintf.c b/src/qu8-requantization/fp32-scalar-lrintf.c
index 3fdccfb..6162cfb 100644
--- a/src/qu8-requantization/fp32-scalar-lrintf.c
+++ b/src/qu8-requantization/fp32-scalar-lrintf.c
@@ -13,6 +13,7 @@
#include <fp16/bitcasts.h>
+#include <xnnpack/math.h>
#include <xnnpack/requantization-stubs.h>
@@ -29,8 +30,8 @@
assert(scale < 1.0f);
assert(scale >= 0x1.0p-32f);
- const long lmin = (long) ((int32_t)(uint32_t) qmin - (int32_t)(uint32_t) zero_point);
- const long lmax = (long) ((int32_t)(uint32_t) qmax - (int32_t)(uint32_t) zero_point);
+ const float fmin = (float) ((int32_t) (uint32_t) qmin - (int32_t) (uint32_t) zero_point);
+ const float fmax = (float) ((int32_t) (uint32_t) qmax - (int32_t) (uint32_t) zero_point);
for (; n != 0; n -= 4) {
const int32_t x = input[0];
const int32_t y = input[1];
@@ -43,20 +44,20 @@
const float z_scaled = (float) z * scale;
const float w_scaled = (float) w * scale;
- const long x_rounded = lrintf(x_scaled);
- const long y_rounded = lrintf(y_scaled);
- const long z_rounded = lrintf(z_scaled);
- const long w_rounded = lrintf(w_scaled);
+ const float x_clamped = math_min_f32(math_max_f32(x_scaled, fmin), fmax);
+ const float y_clamped = math_min_f32(math_max_f32(y_scaled, fmin), fmax);
+ const float z_clamped = math_min_f32(math_max_f32(z_scaled, fmin), fmax);
+ const float w_clamped = math_min_f32(math_max_f32(w_scaled, fmin), fmax);
- const int32_t x_clamped = (int32_t)(x_rounded < lmin ? lmin : x_rounded > lmax ? lmax : x_rounded);
- const int32_t y_clamped = (int32_t)(y_rounded < lmin ? lmin : y_rounded > lmax ? lmax : y_rounded);
- const int32_t z_clamped = (int32_t)(z_rounded < lmin ? lmin : z_rounded > lmax ? lmax : z_rounded);
- const int32_t w_clamped = (int32_t)(w_rounded < lmin ? lmin : w_rounded > lmax ? lmax : w_rounded);
+ const int32_t x_rounded = (int32_t) lrintf(x_clamped);
+ const int32_t y_rounded = (int32_t) lrintf(y_clamped);
+ const int32_t z_rounded = (int32_t) lrintf(z_clamped);
+ const int32_t w_rounded = (int32_t) lrintf(w_clamped);
- const int32_t x_biased = x_clamped + (int32_t)(uint32_t) zero_point;
- const int32_t y_biased = y_clamped + (int32_t)(uint32_t) zero_point;
- const int32_t z_biased = z_clamped + (int32_t)(uint32_t) zero_point;
- const int32_t w_biased = w_clamped + (int32_t)(uint32_t) zero_point;
+ const int32_t x_biased = x_rounded + (int32_t) (uint32_t) zero_point;
+ const int32_t y_biased = y_rounded + (int32_t) (uint32_t) zero_point;
+ const int32_t z_biased = z_rounded + (int32_t) (uint32_t) zero_point;
+ const int32_t w_biased = w_rounded + (int32_t) (uint32_t) zero_point;
output[0] = (uint8_t) x_biased;
output[1] = (uint8_t) y_biased;
diff --git a/src/xnnpack/dwconv.h b/src/xnnpack/dwconv.h
index 4579e2c..f63b709 100644
--- a/src/xnnpack/dwconv.h
+++ b/src/xnnpack/dwconv.h
@@ -409,13 +409,13 @@
DECLARE_QU8_DWCONV_MINMAX_UNIPASS_UKERNEL_FUNCTION(xnn_qu8_dwconv_minmax_fp32_ukernel_up16x9__wasmsimd_mul16)
DECLARE_QU8_DWCONV_MINMAX_UNIPASS_UKERNEL_FUNCTION(xnn_qu8_dwconv_minmax_fp32_ukernel_up24x9__wasmsimd_mul16)
-DECLARE_QU8_DWCONV_MINMAX_UNIPASS_UKERNEL_FUNCTION(xnn_qu8_dwconv_minmax_fp32_ukernel_up1x9__scalar_lrint)
-DECLARE_QU8_DWCONV_MINMAX_UNIPASS_UKERNEL_FUNCTION(xnn_qu8_dwconv_minmax_fp32_ukernel_up2x9__scalar_lrint)
-DECLARE_QU8_DWCONV_MINMAX_UNIPASS_UKERNEL_FUNCTION(xnn_qu8_dwconv_minmax_fp32_ukernel_up4x9__scalar_lrint)
+DECLARE_QU8_DWCONV_MINMAX_UNIPASS_UKERNEL_FUNCTION(xnn_qu8_dwconv_minmax_fp32_ukernel_up1x9__scalar_lrintf)
+DECLARE_QU8_DWCONV_MINMAX_UNIPASS_UKERNEL_FUNCTION(xnn_qu8_dwconv_minmax_fp32_ukernel_up2x9__scalar_lrintf)
+DECLARE_QU8_DWCONV_MINMAX_UNIPASS_UKERNEL_FUNCTION(xnn_qu8_dwconv_minmax_fp32_ukernel_up4x9__scalar_lrintf)
-DECLARE_QU8_DWCONV_MINMAX_UNIPASS_UKERNEL_FUNCTION(xnn_qu8_dwconv_minmax_fp32_ukernel_up1x9__scalar_magic)
-DECLARE_QU8_DWCONV_MINMAX_UNIPASS_UKERNEL_FUNCTION(xnn_qu8_dwconv_minmax_fp32_ukernel_up2x9__scalar_magic)
-DECLARE_QU8_DWCONV_MINMAX_UNIPASS_UKERNEL_FUNCTION(xnn_qu8_dwconv_minmax_fp32_ukernel_up4x9__scalar_magic)
+DECLARE_QU8_DWCONV_MINMAX_UNIPASS_UKERNEL_FUNCTION(xnn_qu8_dwconv_minmax_fp32_ukernel_up1x9__scalar_fmagic)
+DECLARE_QU8_DWCONV_MINMAX_UNIPASS_UKERNEL_FUNCTION(xnn_qu8_dwconv_minmax_fp32_ukernel_up2x9__scalar_fmagic)
+DECLARE_QU8_DWCONV_MINMAX_UNIPASS_UKERNEL_FUNCTION(xnn_qu8_dwconv_minmax_fp32_ukernel_up4x9__scalar_fmagic)
DECLARE_QU8_DWCONV_MINMAX_UNIPASS_UKERNEL_FUNCTION(xnn_qu8_dwconv_minmax_fp32_ukernel_up8x25__neon_mul16)
DECLARE_QU8_DWCONV_MINMAX_UNIPASS_UKERNEL_FUNCTION(xnn_qu8_dwconv_minmax_fp32_ukernel_up16x25__neon_mul16)
@@ -457,13 +457,13 @@
DECLARE_QU8_DWCONV_MINMAX_UNIPASS_UKERNEL_FUNCTION(xnn_qu8_dwconv_minmax_fp32_ukernel_up16x25__wasmsimd_mul16)
DECLARE_QU8_DWCONV_MINMAX_UNIPASS_UKERNEL_FUNCTION(xnn_qu8_dwconv_minmax_fp32_ukernel_up24x25__wasmsimd_mul16)
-DECLARE_QU8_DWCONV_MINMAX_UNIPASS_UKERNEL_FUNCTION(xnn_qu8_dwconv_minmax_fp32_ukernel_up1x25__scalar_lrint)
-DECLARE_QU8_DWCONV_MINMAX_UNIPASS_UKERNEL_FUNCTION(xnn_qu8_dwconv_minmax_fp32_ukernel_up2x25__scalar_lrint)
-DECLARE_QU8_DWCONV_MINMAX_UNIPASS_UKERNEL_FUNCTION(xnn_qu8_dwconv_minmax_fp32_ukernel_up4x25__scalar_lrint)
+DECLARE_QU8_DWCONV_MINMAX_UNIPASS_UKERNEL_FUNCTION(xnn_qu8_dwconv_minmax_fp32_ukernel_up1x25__scalar_lrintf)
+DECLARE_QU8_DWCONV_MINMAX_UNIPASS_UKERNEL_FUNCTION(xnn_qu8_dwconv_minmax_fp32_ukernel_up2x25__scalar_lrintf)
+DECLARE_QU8_DWCONV_MINMAX_UNIPASS_UKERNEL_FUNCTION(xnn_qu8_dwconv_minmax_fp32_ukernel_up4x25__scalar_lrintf)
-DECLARE_QU8_DWCONV_MINMAX_UNIPASS_UKERNEL_FUNCTION(xnn_qu8_dwconv_minmax_fp32_ukernel_up1x25__scalar_magic)
-DECLARE_QU8_DWCONV_MINMAX_UNIPASS_UKERNEL_FUNCTION(xnn_qu8_dwconv_minmax_fp32_ukernel_up2x25__scalar_magic)
-DECLARE_QU8_DWCONV_MINMAX_UNIPASS_UKERNEL_FUNCTION(xnn_qu8_dwconv_minmax_fp32_ukernel_up4x25__scalar_magic)
+DECLARE_QU8_DWCONV_MINMAX_UNIPASS_UKERNEL_FUNCTION(xnn_qu8_dwconv_minmax_fp32_ukernel_up1x25__scalar_fmagic)
+DECLARE_QU8_DWCONV_MINMAX_UNIPASS_UKERNEL_FUNCTION(xnn_qu8_dwconv_minmax_fp32_ukernel_up2x25__scalar_fmagic)
+DECLARE_QU8_DWCONV_MINMAX_UNIPASS_UKERNEL_FUNCTION(xnn_qu8_dwconv_minmax_fp32_ukernel_up4x25__scalar_fmagic)
#define DECLARE_QS8_DWCONV_MINMAX_UNIPASS_UKERNEL_FUNCTION(fn_name) \
@@ -571,13 +571,13 @@
DECLARE_QS8_DWCONV_MINMAX_UNIPASS_UKERNEL_FUNCTION(xnn_qs8_dwconv_minmax_fp32_ukernel_up16x9__wasmsimd_mul16_add16)
DECLARE_QS8_DWCONV_MINMAX_UNIPASS_UKERNEL_FUNCTION(xnn_qs8_dwconv_minmax_fp32_ukernel_up24x9__wasmsimd_mul16_add16)
-DECLARE_QS8_DWCONV_MINMAX_UNIPASS_UKERNEL_FUNCTION(xnn_qs8_dwconv_minmax_fp32_ukernel_up1x9__scalar_lrint)
-DECLARE_QS8_DWCONV_MINMAX_UNIPASS_UKERNEL_FUNCTION(xnn_qs8_dwconv_minmax_fp32_ukernel_up2x9__scalar_lrint)
-DECLARE_QS8_DWCONV_MINMAX_UNIPASS_UKERNEL_FUNCTION(xnn_qs8_dwconv_minmax_fp32_ukernel_up4x9__scalar_lrint)
+DECLARE_QS8_DWCONV_MINMAX_UNIPASS_UKERNEL_FUNCTION(xnn_qs8_dwconv_minmax_fp32_ukernel_up1x9__scalar_lrintf)
+DECLARE_QS8_DWCONV_MINMAX_UNIPASS_UKERNEL_FUNCTION(xnn_qs8_dwconv_minmax_fp32_ukernel_up2x9__scalar_lrintf)
+DECLARE_QS8_DWCONV_MINMAX_UNIPASS_UKERNEL_FUNCTION(xnn_qs8_dwconv_minmax_fp32_ukernel_up4x9__scalar_lrintf)
-DECLARE_QS8_DWCONV_MINMAX_UNIPASS_UKERNEL_FUNCTION(xnn_qs8_dwconv_minmax_fp32_ukernel_up1x9__scalar_magic)
-DECLARE_QS8_DWCONV_MINMAX_UNIPASS_UKERNEL_FUNCTION(xnn_qs8_dwconv_minmax_fp32_ukernel_up2x9__scalar_magic)
-DECLARE_QS8_DWCONV_MINMAX_UNIPASS_UKERNEL_FUNCTION(xnn_qs8_dwconv_minmax_fp32_ukernel_up4x9__scalar_magic)
+DECLARE_QS8_DWCONV_MINMAX_UNIPASS_UKERNEL_FUNCTION(xnn_qs8_dwconv_minmax_fp32_ukernel_up1x9__scalar_fmagic)
+DECLARE_QS8_DWCONV_MINMAX_UNIPASS_UKERNEL_FUNCTION(xnn_qs8_dwconv_minmax_fp32_ukernel_up2x9__scalar_fmagic)
+DECLARE_QS8_DWCONV_MINMAX_UNIPASS_UKERNEL_FUNCTION(xnn_qs8_dwconv_minmax_fp32_ukernel_up4x9__scalar_fmagic)
DECLARE_QS8_DWCONV_MINMAX_UNIPASS_UKERNEL_FUNCTION(xnn_qs8_dwconv_minmax_fp32_ukernel_up8x25__neon_mul16)
DECLARE_QS8_DWCONV_MINMAX_UNIPASS_UKERNEL_FUNCTION(xnn_qs8_dwconv_minmax_fp32_ukernel_up16x25__neon_mul16)
@@ -655,13 +655,13 @@
DECLARE_QS8_DWCONV_MINMAX_UNIPASS_UKERNEL_FUNCTION(xnn_qs8_dwconv_minmax_fp32_ukernel_up16x25__wasmsimd_mul16_add16)
DECLARE_QS8_DWCONV_MINMAX_UNIPASS_UKERNEL_FUNCTION(xnn_qs8_dwconv_minmax_fp32_ukernel_up24x25__wasmsimd_mul16_add16)
-DECLARE_QS8_DWCONV_MINMAX_UNIPASS_UKERNEL_FUNCTION(xnn_qs8_dwconv_minmax_fp32_ukernel_up1x25__scalar_lrint)
-DECLARE_QS8_DWCONV_MINMAX_UNIPASS_UKERNEL_FUNCTION(xnn_qs8_dwconv_minmax_fp32_ukernel_up2x25__scalar_lrint)
-DECLARE_QS8_DWCONV_MINMAX_UNIPASS_UKERNEL_FUNCTION(xnn_qs8_dwconv_minmax_fp32_ukernel_up4x25__scalar_lrint)
+DECLARE_QS8_DWCONV_MINMAX_UNIPASS_UKERNEL_FUNCTION(xnn_qs8_dwconv_minmax_fp32_ukernel_up1x25__scalar_lrintf)
+DECLARE_QS8_DWCONV_MINMAX_UNIPASS_UKERNEL_FUNCTION(xnn_qs8_dwconv_minmax_fp32_ukernel_up2x25__scalar_lrintf)
+DECLARE_QS8_DWCONV_MINMAX_UNIPASS_UKERNEL_FUNCTION(xnn_qs8_dwconv_minmax_fp32_ukernel_up4x25__scalar_lrintf)
-DECLARE_QS8_DWCONV_MINMAX_UNIPASS_UKERNEL_FUNCTION(xnn_qs8_dwconv_minmax_fp32_ukernel_up1x25__scalar_magic)
-DECLARE_QS8_DWCONV_MINMAX_UNIPASS_UKERNEL_FUNCTION(xnn_qs8_dwconv_minmax_fp32_ukernel_up2x25__scalar_magic)
-DECLARE_QS8_DWCONV_MINMAX_UNIPASS_UKERNEL_FUNCTION(xnn_qs8_dwconv_minmax_fp32_ukernel_up4x25__scalar_magic)
+DECLARE_QS8_DWCONV_MINMAX_UNIPASS_UKERNEL_FUNCTION(xnn_qs8_dwconv_minmax_fp32_ukernel_up1x25__scalar_fmagic)
+DECLARE_QS8_DWCONV_MINMAX_UNIPASS_UKERNEL_FUNCTION(xnn_qs8_dwconv_minmax_fp32_ukernel_up2x25__scalar_fmagic)
+DECLARE_QS8_DWCONV_MINMAX_UNIPASS_UKERNEL_FUNCTION(xnn_qs8_dwconv_minmax_fp32_ukernel_up4x25__scalar_fmagic)
#define DECLARE_QC8_DWCONV_MINMAX_UNIPASS_UKERNEL_FUNCTION(fn_name) \
@@ -790,13 +790,13 @@
DECLARE_QC8_DWCONV_MINMAX_UNIPASS_UKERNEL_FUNCTION(xnn_qc8_dwconv_minmax_fp32_ukernel_up16x9__wasmsimd_mul16_add16)
DECLARE_QC8_DWCONV_MINMAX_UNIPASS_UKERNEL_FUNCTION(xnn_qc8_dwconv_minmax_fp32_ukernel_up24x9__wasmsimd_mul16_add16)
-DECLARE_QC8_DWCONV_MINMAX_UNIPASS_UKERNEL_FUNCTION(xnn_qc8_dwconv_minmax_fp32_ukernel_up1x9__scalar_lrint)
-DECLARE_QC8_DWCONV_MINMAX_UNIPASS_UKERNEL_FUNCTION(xnn_qc8_dwconv_minmax_fp32_ukernel_up2x9__scalar_lrint)
-DECLARE_QC8_DWCONV_MINMAX_UNIPASS_UKERNEL_FUNCTION(xnn_qc8_dwconv_minmax_fp32_ukernel_up4x9__scalar_lrint)
+DECLARE_QC8_DWCONV_MINMAX_UNIPASS_UKERNEL_FUNCTION(xnn_qc8_dwconv_minmax_fp32_ukernel_up1x9__scalar_lrintf)
+DECLARE_QC8_DWCONV_MINMAX_UNIPASS_UKERNEL_FUNCTION(xnn_qc8_dwconv_minmax_fp32_ukernel_up2x9__scalar_lrintf)
+DECLARE_QC8_DWCONV_MINMAX_UNIPASS_UKERNEL_FUNCTION(xnn_qc8_dwconv_minmax_fp32_ukernel_up4x9__scalar_lrintf)
-DECLARE_QC8_DWCONV_MINMAX_UNIPASS_UKERNEL_FUNCTION(xnn_qc8_dwconv_minmax_fp32_ukernel_up1x9__scalar_magic)
-DECLARE_QC8_DWCONV_MINMAX_UNIPASS_UKERNEL_FUNCTION(xnn_qc8_dwconv_minmax_fp32_ukernel_up2x9__scalar_magic)
-DECLARE_QC8_DWCONV_MINMAX_UNIPASS_UKERNEL_FUNCTION(xnn_qc8_dwconv_minmax_fp32_ukernel_up4x9__scalar_magic)
+DECLARE_QC8_DWCONV_MINMAX_UNIPASS_UKERNEL_FUNCTION(xnn_qc8_dwconv_minmax_fp32_ukernel_up1x9__scalar_fmagic)
+DECLARE_QC8_DWCONV_MINMAX_UNIPASS_UKERNEL_FUNCTION(xnn_qc8_dwconv_minmax_fp32_ukernel_up2x9__scalar_fmagic)
+DECLARE_QC8_DWCONV_MINMAX_UNIPASS_UKERNEL_FUNCTION(xnn_qc8_dwconv_minmax_fp32_ukernel_up4x9__scalar_fmagic)
DECLARE_QC8_DWCONV_MINMAX_UNIPASS_UKERNEL_FUNCTION(xnn_qc8_dwconv_minmax_fp32_ukernel_up8x25__sse2_mul16)
DECLARE_QC8_DWCONV_MINMAX_UNIPASS_UKERNEL_FUNCTION(xnn_qc8_dwconv_minmax_fp32_ukernel_up16x25__sse2_mul16)
@@ -859,13 +859,13 @@
DECLARE_QC8_DWCONV_MINMAX_UNIPASS_UKERNEL_FUNCTION(xnn_qc8_dwconv_minmax_fp32_ukernel_up16x25__wasmsimd_mul16_add16)
DECLARE_QC8_DWCONV_MINMAX_UNIPASS_UKERNEL_FUNCTION(xnn_qc8_dwconv_minmax_fp32_ukernel_up24x25__wasmsimd_mul16_add16)
-DECLARE_QC8_DWCONV_MINMAX_UNIPASS_UKERNEL_FUNCTION(xnn_qc8_dwconv_minmax_fp32_ukernel_up1x25__scalar_lrint)
-DECLARE_QC8_DWCONV_MINMAX_UNIPASS_UKERNEL_FUNCTION(xnn_qc8_dwconv_minmax_fp32_ukernel_up2x25__scalar_lrint)
-DECLARE_QC8_DWCONV_MINMAX_UNIPASS_UKERNEL_FUNCTION(xnn_qc8_dwconv_minmax_fp32_ukernel_up4x25__scalar_lrint)
+DECLARE_QC8_DWCONV_MINMAX_UNIPASS_UKERNEL_FUNCTION(xnn_qc8_dwconv_minmax_fp32_ukernel_up1x25__scalar_lrintf)
+DECLARE_QC8_DWCONV_MINMAX_UNIPASS_UKERNEL_FUNCTION(xnn_qc8_dwconv_minmax_fp32_ukernel_up2x25__scalar_lrintf)
+DECLARE_QC8_DWCONV_MINMAX_UNIPASS_UKERNEL_FUNCTION(xnn_qc8_dwconv_minmax_fp32_ukernel_up4x25__scalar_lrintf)
-DECLARE_QC8_DWCONV_MINMAX_UNIPASS_UKERNEL_FUNCTION(xnn_qc8_dwconv_minmax_fp32_ukernel_up1x25__scalar_magic)
-DECLARE_QC8_DWCONV_MINMAX_UNIPASS_UKERNEL_FUNCTION(xnn_qc8_dwconv_minmax_fp32_ukernel_up2x25__scalar_magic)
-DECLARE_QC8_DWCONV_MINMAX_UNIPASS_UKERNEL_FUNCTION(xnn_qc8_dwconv_minmax_fp32_ukernel_up4x25__scalar_magic)
+DECLARE_QC8_DWCONV_MINMAX_UNIPASS_UKERNEL_FUNCTION(xnn_qc8_dwconv_minmax_fp32_ukernel_up1x25__scalar_fmagic)
+DECLARE_QC8_DWCONV_MINMAX_UNIPASS_UKERNEL_FUNCTION(xnn_qc8_dwconv_minmax_fp32_ukernel_up2x25__scalar_fmagic)
+DECLARE_QC8_DWCONV_MINMAX_UNIPASS_UKERNEL_FUNCTION(xnn_qc8_dwconv_minmax_fp32_ukernel_up4x25__scalar_fmagic)
#define DECLARE_F32_DWCONV2D_CHW_MINMAX_UKERNEL_FUNCTION(fn_name) \
diff --git a/src/xnnpack/gemm.h b/src/xnnpack/gemm.h
index f4d687d..06f859f 100644
--- a/src/xnnpack/gemm.h
+++ b/src/xnnpack/gemm.h
@@ -646,25 +646,25 @@
DECLARE_QU8_GEMM_MINMAX_UKERNEL_FUNCTION(xnn_qu8_gemm_minmax_fp32_ukernel_2x4c8__wasmsimd_mul32_ld128)
DECLARE_QU8_GEMM_MINMAX_UKERNEL_FUNCTION(xnn_qu8_gemm_minmax_fp32_ukernel_3x4c8__wasmsimd_mul32_ld128)
-DECLARE_QU8_GEMM_MINMAX_UKERNEL_FUNCTION(xnn_qu8_gemm_minmax_fp32_ukernel_1x2__scalar_lrint)
-DECLARE_QU8_GEMM_MINMAX_UKERNEL_FUNCTION(xnn_qu8_gemm_minmax_fp32_ukernel_2x2__scalar_lrint)
-DECLARE_QU8_GEMM_MINMAX_UKERNEL_FUNCTION(xnn_qu8_gemm_minmax_fp32_ukernel_3x2__scalar_lrint)
-DECLARE_QU8_GEMM_MINMAX_UKERNEL_FUNCTION(xnn_qu8_gemm_minmax_fp32_ukernel_4x2__scalar_lrint)
+DECLARE_QU8_GEMM_MINMAX_UKERNEL_FUNCTION(xnn_qu8_gemm_minmax_fp32_ukernel_1x2__scalar_lrintf)
+DECLARE_QU8_GEMM_MINMAX_UKERNEL_FUNCTION(xnn_qu8_gemm_minmax_fp32_ukernel_2x2__scalar_lrintf)
+DECLARE_QU8_GEMM_MINMAX_UKERNEL_FUNCTION(xnn_qu8_gemm_minmax_fp32_ukernel_3x2__scalar_lrintf)
+DECLARE_QU8_GEMM_MINMAX_UKERNEL_FUNCTION(xnn_qu8_gemm_minmax_fp32_ukernel_4x2__scalar_lrintf)
-DECLARE_QU8_GEMM_MINMAX_UKERNEL_FUNCTION(xnn_qu8_gemm_minmax_fp32_ukernel_1x2__scalar_magic)
-DECLARE_QU8_GEMM_MINMAX_UKERNEL_FUNCTION(xnn_qu8_gemm_minmax_fp32_ukernel_2x2__scalar_magic)
-DECLARE_QU8_GEMM_MINMAX_UKERNEL_FUNCTION(xnn_qu8_gemm_minmax_fp32_ukernel_3x2__scalar_magic)
-DECLARE_QU8_GEMM_MINMAX_UKERNEL_FUNCTION(xnn_qu8_gemm_minmax_fp32_ukernel_4x2__scalar_magic)
+DECLARE_QU8_GEMM_MINMAX_UKERNEL_FUNCTION(xnn_qu8_gemm_minmax_fp32_ukernel_1x2__scalar_fmagic)
+DECLARE_QU8_GEMM_MINMAX_UKERNEL_FUNCTION(xnn_qu8_gemm_minmax_fp32_ukernel_2x2__scalar_fmagic)
+DECLARE_QU8_GEMM_MINMAX_UKERNEL_FUNCTION(xnn_qu8_gemm_minmax_fp32_ukernel_3x2__scalar_fmagic)
+DECLARE_QU8_GEMM_MINMAX_UKERNEL_FUNCTION(xnn_qu8_gemm_minmax_fp32_ukernel_4x2__scalar_fmagic)
-DECLARE_QU8_GEMM_MINMAX_UKERNEL_FUNCTION(xnn_qu8_gemm_minmax_fp32_ukernel_1x4__scalar_lrint)
-DECLARE_QU8_GEMM_MINMAX_UKERNEL_FUNCTION(xnn_qu8_gemm_minmax_fp32_ukernel_2x4__scalar_lrint)
-DECLARE_QU8_GEMM_MINMAX_UKERNEL_FUNCTION(xnn_qu8_gemm_minmax_fp32_ukernel_3x4__scalar_lrint)
-DECLARE_QU8_GEMM_MINMAX_UKERNEL_FUNCTION(xnn_qu8_gemm_minmax_fp32_ukernel_4x4__scalar_lrint)
+DECLARE_QU8_GEMM_MINMAX_UKERNEL_FUNCTION(xnn_qu8_gemm_minmax_fp32_ukernel_1x4__scalar_lrintf)
+DECLARE_QU8_GEMM_MINMAX_UKERNEL_FUNCTION(xnn_qu8_gemm_minmax_fp32_ukernel_2x4__scalar_lrintf)
+DECLARE_QU8_GEMM_MINMAX_UKERNEL_FUNCTION(xnn_qu8_gemm_minmax_fp32_ukernel_3x4__scalar_lrintf)
+DECLARE_QU8_GEMM_MINMAX_UKERNEL_FUNCTION(xnn_qu8_gemm_minmax_fp32_ukernel_4x4__scalar_lrintf)
-DECLARE_QU8_GEMM_MINMAX_UKERNEL_FUNCTION(xnn_qu8_gemm_minmax_fp32_ukernel_1x4__scalar_magic)
-DECLARE_QU8_GEMM_MINMAX_UKERNEL_FUNCTION(xnn_qu8_gemm_minmax_fp32_ukernel_2x4__scalar_magic)
-DECLARE_QU8_GEMM_MINMAX_UKERNEL_FUNCTION(xnn_qu8_gemm_minmax_fp32_ukernel_3x4__scalar_magic)
-DECLARE_QU8_GEMM_MINMAX_UKERNEL_FUNCTION(xnn_qu8_gemm_minmax_fp32_ukernel_4x4__scalar_magic)
+DECLARE_QU8_GEMM_MINMAX_UKERNEL_FUNCTION(xnn_qu8_gemm_minmax_fp32_ukernel_1x4__scalar_fmagic)
+DECLARE_QU8_GEMM_MINMAX_UKERNEL_FUNCTION(xnn_qu8_gemm_minmax_fp32_ukernel_2x4__scalar_fmagic)
+DECLARE_QU8_GEMM_MINMAX_UKERNEL_FUNCTION(xnn_qu8_gemm_minmax_fp32_ukernel_3x4__scalar_fmagic)
+DECLARE_QU8_GEMM_MINMAX_UKERNEL_FUNCTION(xnn_qu8_gemm_minmax_fp32_ukernel_4x4__scalar_fmagic)
#define DECLARE_QS8_GEMM_MINMAX_UKERNEL_FUNCTION(fn_name) \
@@ -1225,30 +1225,30 @@
DECLARE_QS8_GEMM_MINMAX_UKERNEL_FUNCTION(xnn_qs8_gemm_minmax_rndnu_ukernel_3x2__scalar)
DECLARE_QS8_GEMM_MINMAX_UKERNEL_FUNCTION(xnn_qs8_gemm_minmax_rndnu_ukernel_4x2__scalar)
-DECLARE_QS8_GEMM_MINMAX_UKERNEL_FUNCTION(xnn_qs8_gemm_minmax_fp32_ukernel_1x2__scalar_lrint)
-DECLARE_QS8_GEMM_MINMAX_UKERNEL_FUNCTION(xnn_qs8_gemm_minmax_fp32_ukernel_2x2__scalar_lrint)
-DECLARE_QS8_GEMM_MINMAX_UKERNEL_FUNCTION(xnn_qs8_gemm_minmax_fp32_ukernel_3x2__scalar_lrint)
-DECLARE_QS8_GEMM_MINMAX_UKERNEL_FUNCTION(xnn_qs8_gemm_minmax_fp32_ukernel_4x2__scalar_lrint)
+DECLARE_QS8_GEMM_MINMAX_UKERNEL_FUNCTION(xnn_qs8_gemm_minmax_fp32_ukernel_1x2__scalar_lrintf)
+DECLARE_QS8_GEMM_MINMAX_UKERNEL_FUNCTION(xnn_qs8_gemm_minmax_fp32_ukernel_2x2__scalar_lrintf)
+DECLARE_QS8_GEMM_MINMAX_UKERNEL_FUNCTION(xnn_qs8_gemm_minmax_fp32_ukernel_3x2__scalar_lrintf)
+DECLARE_QS8_GEMM_MINMAX_UKERNEL_FUNCTION(xnn_qs8_gemm_minmax_fp32_ukernel_4x2__scalar_lrintf)
-DECLARE_QS8_GEMM_MINMAX_UKERNEL_FUNCTION(xnn_qs8_gemm_minmax_fp32_ukernel_1x2__scalar_magic)
-DECLARE_QS8_GEMM_MINMAX_UKERNEL_FUNCTION(xnn_qs8_gemm_minmax_fp32_ukernel_2x2__scalar_magic)
-DECLARE_QS8_GEMM_MINMAX_UKERNEL_FUNCTION(xnn_qs8_gemm_minmax_fp32_ukernel_3x2__scalar_magic)
-DECLARE_QS8_GEMM_MINMAX_UKERNEL_FUNCTION(xnn_qs8_gemm_minmax_fp32_ukernel_4x2__scalar_magic)
+DECLARE_QS8_GEMM_MINMAX_UKERNEL_FUNCTION(xnn_qs8_gemm_minmax_fp32_ukernel_1x2__scalar_fmagic)
+DECLARE_QS8_GEMM_MINMAX_UKERNEL_FUNCTION(xnn_qs8_gemm_minmax_fp32_ukernel_2x2__scalar_fmagic)
+DECLARE_QS8_GEMM_MINMAX_UKERNEL_FUNCTION(xnn_qs8_gemm_minmax_fp32_ukernel_3x2__scalar_fmagic)
+DECLARE_QS8_GEMM_MINMAX_UKERNEL_FUNCTION(xnn_qs8_gemm_minmax_fp32_ukernel_4x2__scalar_fmagic)
DECLARE_QS8_GEMM_MINMAX_UKERNEL_FUNCTION(xnn_qs8_gemm_minmax_rndnu_ukernel_1x4__scalar)
DECLARE_QS8_GEMM_MINMAX_UKERNEL_FUNCTION(xnn_qs8_gemm_minmax_rndnu_ukernel_2x4__scalar)
DECLARE_QS8_GEMM_MINMAX_UKERNEL_FUNCTION(xnn_qs8_gemm_minmax_rndnu_ukernel_3x4__scalar)
DECLARE_QS8_GEMM_MINMAX_UKERNEL_FUNCTION(xnn_qs8_gemm_minmax_rndnu_ukernel_4x4__scalar)
-DECLARE_QS8_GEMM_MINMAX_UKERNEL_FUNCTION(xnn_qs8_gemm_minmax_fp32_ukernel_1x4__scalar_lrint)
-DECLARE_QS8_GEMM_MINMAX_UKERNEL_FUNCTION(xnn_qs8_gemm_minmax_fp32_ukernel_2x4__scalar_lrint)
-DECLARE_QS8_GEMM_MINMAX_UKERNEL_FUNCTION(xnn_qs8_gemm_minmax_fp32_ukernel_3x4__scalar_lrint)
-DECLARE_QS8_GEMM_MINMAX_UKERNEL_FUNCTION(xnn_qs8_gemm_minmax_fp32_ukernel_4x4__scalar_lrint)
+DECLARE_QS8_GEMM_MINMAX_UKERNEL_FUNCTION(xnn_qs8_gemm_minmax_fp32_ukernel_1x4__scalar_lrintf)
+DECLARE_QS8_GEMM_MINMAX_UKERNEL_FUNCTION(xnn_qs8_gemm_minmax_fp32_ukernel_2x4__scalar_lrintf)
+DECLARE_QS8_GEMM_MINMAX_UKERNEL_FUNCTION(xnn_qs8_gemm_minmax_fp32_ukernel_3x4__scalar_lrintf)
+DECLARE_QS8_GEMM_MINMAX_UKERNEL_FUNCTION(xnn_qs8_gemm_minmax_fp32_ukernel_4x4__scalar_lrintf)
-DECLARE_QS8_GEMM_MINMAX_UKERNEL_FUNCTION(xnn_qs8_gemm_minmax_fp32_ukernel_1x4__scalar_magic)
-DECLARE_QS8_GEMM_MINMAX_UKERNEL_FUNCTION(xnn_qs8_gemm_minmax_fp32_ukernel_2x4__scalar_magic)
-DECLARE_QS8_GEMM_MINMAX_UKERNEL_FUNCTION(xnn_qs8_gemm_minmax_fp32_ukernel_3x4__scalar_magic)
-DECLARE_QS8_GEMM_MINMAX_UKERNEL_FUNCTION(xnn_qs8_gemm_minmax_fp32_ukernel_4x4__scalar_magic)
+DECLARE_QS8_GEMM_MINMAX_UKERNEL_FUNCTION(xnn_qs8_gemm_minmax_fp32_ukernel_1x4__scalar_fmagic)
+DECLARE_QS8_GEMM_MINMAX_UKERNEL_FUNCTION(xnn_qs8_gemm_minmax_fp32_ukernel_2x4__scalar_fmagic)
+DECLARE_QS8_GEMM_MINMAX_UKERNEL_FUNCTION(xnn_qs8_gemm_minmax_fp32_ukernel_3x4__scalar_fmagic)
+DECLARE_QS8_GEMM_MINMAX_UKERNEL_FUNCTION(xnn_qs8_gemm_minmax_fp32_ukernel_4x4__scalar_fmagic)
#define DECLARE_QC8_GEMM_MINMAX_UKERNEL_FUNCTION(fn_name) \
@@ -1492,25 +1492,25 @@
DECLARE_QC8_GEMM_MINMAX_UKERNEL_FUNCTION(xnn_qc8_gemm_minmax_fp32_ukernel_2x4c8__wasmsimd_mul16_ld128)
DECLARE_QC8_GEMM_MINMAX_UKERNEL_FUNCTION(xnn_qc8_gemm_minmax_fp32_ukernel_3x4c8__wasmsimd_mul16_ld128)
-DECLARE_QC8_GEMM_MINMAX_UKERNEL_FUNCTION(xnn_qc8_gemm_minmax_fp32_ukernel_1x2__scalar_lrint)
-DECLARE_QC8_GEMM_MINMAX_UKERNEL_FUNCTION(xnn_qc8_gemm_minmax_fp32_ukernel_2x2__scalar_lrint)
-DECLARE_QC8_GEMM_MINMAX_UKERNEL_FUNCTION(xnn_qc8_gemm_minmax_fp32_ukernel_3x2__scalar_lrint)
-DECLARE_QC8_GEMM_MINMAX_UKERNEL_FUNCTION(xnn_qc8_gemm_minmax_fp32_ukernel_4x2__scalar_lrint)
+DECLARE_QC8_GEMM_MINMAX_UKERNEL_FUNCTION(xnn_qc8_gemm_minmax_fp32_ukernel_1x2__scalar_lrintf)
+DECLARE_QC8_GEMM_MINMAX_UKERNEL_FUNCTION(xnn_qc8_gemm_minmax_fp32_ukernel_2x2__scalar_lrintf)
+DECLARE_QC8_GEMM_MINMAX_UKERNEL_FUNCTION(xnn_qc8_gemm_minmax_fp32_ukernel_3x2__scalar_lrintf)
+DECLARE_QC8_GEMM_MINMAX_UKERNEL_FUNCTION(xnn_qc8_gemm_minmax_fp32_ukernel_4x2__scalar_lrintf)
-DECLARE_QC8_GEMM_MINMAX_UKERNEL_FUNCTION(xnn_qc8_gemm_minmax_fp32_ukernel_1x2__scalar_magic)
-DECLARE_QC8_GEMM_MINMAX_UKERNEL_FUNCTION(xnn_qc8_gemm_minmax_fp32_ukernel_2x2__scalar_magic)
-DECLARE_QC8_GEMM_MINMAX_UKERNEL_FUNCTION(xnn_qc8_gemm_minmax_fp32_ukernel_3x2__scalar_magic)
-DECLARE_QC8_GEMM_MINMAX_UKERNEL_FUNCTION(xnn_qc8_gemm_minmax_fp32_ukernel_4x2__scalar_magic)
+DECLARE_QC8_GEMM_MINMAX_UKERNEL_FUNCTION(xnn_qc8_gemm_minmax_fp32_ukernel_1x2__scalar_fmagic)
+DECLARE_QC8_GEMM_MINMAX_UKERNEL_FUNCTION(xnn_qc8_gemm_minmax_fp32_ukernel_2x2__scalar_fmagic)
+DECLARE_QC8_GEMM_MINMAX_UKERNEL_FUNCTION(xnn_qc8_gemm_minmax_fp32_ukernel_3x2__scalar_fmagic)
+DECLARE_QC8_GEMM_MINMAX_UKERNEL_FUNCTION(xnn_qc8_gemm_minmax_fp32_ukernel_4x2__scalar_fmagic)
-DECLARE_QC8_GEMM_MINMAX_UKERNEL_FUNCTION(xnn_qc8_gemm_minmax_fp32_ukernel_1x4__scalar_lrint)
-DECLARE_QC8_GEMM_MINMAX_UKERNEL_FUNCTION(xnn_qc8_gemm_minmax_fp32_ukernel_2x4__scalar_lrint)
-DECLARE_QC8_GEMM_MINMAX_UKERNEL_FUNCTION(xnn_qc8_gemm_minmax_fp32_ukernel_3x4__scalar_lrint)
-DECLARE_QC8_GEMM_MINMAX_UKERNEL_FUNCTION(xnn_qc8_gemm_minmax_fp32_ukernel_4x4__scalar_lrint)
+DECLARE_QC8_GEMM_MINMAX_UKERNEL_FUNCTION(xnn_qc8_gemm_minmax_fp32_ukernel_1x4__scalar_lrintf)
+DECLARE_QC8_GEMM_MINMAX_UKERNEL_FUNCTION(xnn_qc8_gemm_minmax_fp32_ukernel_2x4__scalar_lrintf)
+DECLARE_QC8_GEMM_MINMAX_UKERNEL_FUNCTION(xnn_qc8_gemm_minmax_fp32_ukernel_3x4__scalar_lrintf)
+DECLARE_QC8_GEMM_MINMAX_UKERNEL_FUNCTION(xnn_qc8_gemm_minmax_fp32_ukernel_4x4__scalar_lrintf)
-DECLARE_QC8_GEMM_MINMAX_UKERNEL_FUNCTION(xnn_qc8_gemm_minmax_fp32_ukernel_1x4__scalar_magic)
-DECLARE_QC8_GEMM_MINMAX_UKERNEL_FUNCTION(xnn_qc8_gemm_minmax_fp32_ukernel_2x4__scalar_magic)
-DECLARE_QC8_GEMM_MINMAX_UKERNEL_FUNCTION(xnn_qc8_gemm_minmax_fp32_ukernel_3x4__scalar_magic)
-DECLARE_QC8_GEMM_MINMAX_UKERNEL_FUNCTION(xnn_qc8_gemm_minmax_fp32_ukernel_4x4__scalar_magic)
+DECLARE_QC8_GEMM_MINMAX_UKERNEL_FUNCTION(xnn_qc8_gemm_minmax_fp32_ukernel_1x4__scalar_fmagic)
+DECLARE_QC8_GEMM_MINMAX_UKERNEL_FUNCTION(xnn_qc8_gemm_minmax_fp32_ukernel_2x4__scalar_fmagic)
+DECLARE_QC8_GEMM_MINMAX_UKERNEL_FUNCTION(xnn_qc8_gemm_minmax_fp32_ukernel_3x4__scalar_fmagic)
+DECLARE_QC8_GEMM_MINMAX_UKERNEL_FUNCTION(xnn_qc8_gemm_minmax_fp32_ukernel_4x4__scalar_fmagic)
enum xnn_status xnn_generate_f32_gemm_ukernel_4x8__aarch32_neon_cortex_a55(struct xnn_code_buffer* code);
diff --git a/src/xnnpack/igemm.h b/src/xnnpack/igemm.h
index 0a9d89d..a3f8d52 100644
--- a/src/xnnpack/igemm.h
+++ b/src/xnnpack/igemm.h
@@ -469,25 +469,25 @@
DECLARE_QU8_IGEMM_MINMAX_UKERNEL_FUNCTION(xnn_qu8_igemm_minmax_fp32_ukernel_2x4c8__wasmsimd_mul32_ld128)
DECLARE_QU8_IGEMM_MINMAX_UKERNEL_FUNCTION(xnn_qu8_igemm_minmax_fp32_ukernel_3x4c8__wasmsimd_mul32_ld128)
-DECLARE_QU8_IGEMM_MINMAX_UKERNEL_FUNCTION(xnn_qu8_igemm_minmax_fp32_ukernel_1x2__scalar_lrint)
-DECLARE_QU8_IGEMM_MINMAX_UKERNEL_FUNCTION(xnn_qu8_igemm_minmax_fp32_ukernel_2x2__scalar_lrint)
-DECLARE_QU8_IGEMM_MINMAX_UKERNEL_FUNCTION(xnn_qu8_igemm_minmax_fp32_ukernel_3x2__scalar_lrint)
-DECLARE_QU8_IGEMM_MINMAX_UKERNEL_FUNCTION(xnn_qu8_igemm_minmax_fp32_ukernel_4x2__scalar_lrint)
+DECLARE_QU8_IGEMM_MINMAX_UKERNEL_FUNCTION(xnn_qu8_igemm_minmax_fp32_ukernel_1x2__scalar_lrintf)
+DECLARE_QU8_IGEMM_MINMAX_UKERNEL_FUNCTION(xnn_qu8_igemm_minmax_fp32_ukernel_2x2__scalar_lrintf)
+DECLARE_QU8_IGEMM_MINMAX_UKERNEL_FUNCTION(xnn_qu8_igemm_minmax_fp32_ukernel_3x2__scalar_lrintf)
+DECLARE_QU8_IGEMM_MINMAX_UKERNEL_FUNCTION(xnn_qu8_igemm_minmax_fp32_ukernel_4x2__scalar_lrintf)
-DECLARE_QU8_IGEMM_MINMAX_UKERNEL_FUNCTION(xnn_qu8_igemm_minmax_fp32_ukernel_1x2__scalar_magic)
-DECLARE_QU8_IGEMM_MINMAX_UKERNEL_FUNCTION(xnn_qu8_igemm_minmax_fp32_ukernel_2x2__scalar_magic)
-DECLARE_QU8_IGEMM_MINMAX_UKERNEL_FUNCTION(xnn_qu8_igemm_minmax_fp32_ukernel_3x2__scalar_magic)
-DECLARE_QU8_IGEMM_MINMAX_UKERNEL_FUNCTION(xnn_qu8_igemm_minmax_fp32_ukernel_4x2__scalar_magic)
+DECLARE_QU8_IGEMM_MINMAX_UKERNEL_FUNCTION(xnn_qu8_igemm_minmax_fp32_ukernel_1x2__scalar_fmagic)
+DECLARE_QU8_IGEMM_MINMAX_UKERNEL_FUNCTION(xnn_qu8_igemm_minmax_fp32_ukernel_2x2__scalar_fmagic)
+DECLARE_QU8_IGEMM_MINMAX_UKERNEL_FUNCTION(xnn_qu8_igemm_minmax_fp32_ukernel_3x2__scalar_fmagic)
+DECLARE_QU8_IGEMM_MINMAX_UKERNEL_FUNCTION(xnn_qu8_igemm_minmax_fp32_ukernel_4x2__scalar_fmagic)
-DECLARE_QU8_IGEMM_MINMAX_UKERNEL_FUNCTION(xnn_qu8_igemm_minmax_fp32_ukernel_1x4__scalar_lrint)
-DECLARE_QU8_IGEMM_MINMAX_UKERNEL_FUNCTION(xnn_qu8_igemm_minmax_fp32_ukernel_2x4__scalar_lrint)
-DECLARE_QU8_IGEMM_MINMAX_UKERNEL_FUNCTION(xnn_qu8_igemm_minmax_fp32_ukernel_3x4__scalar_lrint)
-DECLARE_QU8_IGEMM_MINMAX_UKERNEL_FUNCTION(xnn_qu8_igemm_minmax_fp32_ukernel_4x4__scalar_lrint)
+DECLARE_QU8_IGEMM_MINMAX_UKERNEL_FUNCTION(xnn_qu8_igemm_minmax_fp32_ukernel_1x4__scalar_lrintf)
+DECLARE_QU8_IGEMM_MINMAX_UKERNEL_FUNCTION(xnn_qu8_igemm_minmax_fp32_ukernel_2x4__scalar_lrintf)
+DECLARE_QU8_IGEMM_MINMAX_UKERNEL_FUNCTION(xnn_qu8_igemm_minmax_fp32_ukernel_3x4__scalar_lrintf)
+DECLARE_QU8_IGEMM_MINMAX_UKERNEL_FUNCTION(xnn_qu8_igemm_minmax_fp32_ukernel_4x4__scalar_lrintf)
-DECLARE_QU8_IGEMM_MINMAX_UKERNEL_FUNCTION(xnn_qu8_igemm_minmax_fp32_ukernel_1x4__scalar_magic)
-DECLARE_QU8_IGEMM_MINMAX_UKERNEL_FUNCTION(xnn_qu8_igemm_minmax_fp32_ukernel_2x4__scalar_magic)
-DECLARE_QU8_IGEMM_MINMAX_UKERNEL_FUNCTION(xnn_qu8_igemm_minmax_fp32_ukernel_3x4__scalar_magic)
-DECLARE_QU8_IGEMM_MINMAX_UKERNEL_FUNCTION(xnn_qu8_igemm_minmax_fp32_ukernel_4x4__scalar_magic)
+DECLARE_QU8_IGEMM_MINMAX_UKERNEL_FUNCTION(xnn_qu8_igemm_minmax_fp32_ukernel_1x4__scalar_fmagic)
+DECLARE_QU8_IGEMM_MINMAX_UKERNEL_FUNCTION(xnn_qu8_igemm_minmax_fp32_ukernel_2x4__scalar_fmagic)
+DECLARE_QU8_IGEMM_MINMAX_UKERNEL_FUNCTION(xnn_qu8_igemm_minmax_fp32_ukernel_3x4__scalar_fmagic)
+DECLARE_QU8_IGEMM_MINMAX_UKERNEL_FUNCTION(xnn_qu8_igemm_minmax_fp32_ukernel_4x4__scalar_fmagic)
#define DECLARE_QS8_IGEMM_MINMAX_UKERNEL_FUNCTION(fn_name) \
@@ -983,30 +983,30 @@
DECLARE_QS8_IGEMM_MINMAX_UKERNEL_FUNCTION(xnn_qs8_igemm_minmax_rndnu_ukernel_3x2__scalar)
DECLARE_QS8_IGEMM_MINMAX_UKERNEL_FUNCTION(xnn_qs8_igemm_minmax_rndnu_ukernel_4x2__scalar)
-DECLARE_QS8_IGEMM_MINMAX_UKERNEL_FUNCTION(xnn_qs8_igemm_minmax_fp32_ukernel_1x2__scalar_lrint)
-DECLARE_QS8_IGEMM_MINMAX_UKERNEL_FUNCTION(xnn_qs8_igemm_minmax_fp32_ukernel_2x2__scalar_lrint)
-DECLARE_QS8_IGEMM_MINMAX_UKERNEL_FUNCTION(xnn_qs8_igemm_minmax_fp32_ukernel_3x2__scalar_lrint)
-DECLARE_QS8_IGEMM_MINMAX_UKERNEL_FUNCTION(xnn_qs8_igemm_minmax_fp32_ukernel_4x2__scalar_lrint)
+DECLARE_QS8_IGEMM_MINMAX_UKERNEL_FUNCTION(xnn_qs8_igemm_minmax_fp32_ukernel_1x2__scalar_lrintf)
+DECLARE_QS8_IGEMM_MINMAX_UKERNEL_FUNCTION(xnn_qs8_igemm_minmax_fp32_ukernel_2x2__scalar_lrintf)
+DECLARE_QS8_IGEMM_MINMAX_UKERNEL_FUNCTION(xnn_qs8_igemm_minmax_fp32_ukernel_3x2__scalar_lrintf)
+DECLARE_QS8_IGEMM_MINMAX_UKERNEL_FUNCTION(xnn_qs8_igemm_minmax_fp32_ukernel_4x2__scalar_lrintf)
-DECLARE_QS8_IGEMM_MINMAX_UKERNEL_FUNCTION(xnn_qs8_igemm_minmax_fp32_ukernel_1x2__scalar_magic)
-DECLARE_QS8_IGEMM_MINMAX_UKERNEL_FUNCTION(xnn_qs8_igemm_minmax_fp32_ukernel_2x2__scalar_magic)
-DECLARE_QS8_IGEMM_MINMAX_UKERNEL_FUNCTION(xnn_qs8_igemm_minmax_fp32_ukernel_3x2__scalar_magic)
-DECLARE_QS8_IGEMM_MINMAX_UKERNEL_FUNCTION(xnn_qs8_igemm_minmax_fp32_ukernel_4x2__scalar_magic)
+DECLARE_QS8_IGEMM_MINMAX_UKERNEL_FUNCTION(xnn_qs8_igemm_minmax_fp32_ukernel_1x2__scalar_fmagic)
+DECLARE_QS8_IGEMM_MINMAX_UKERNEL_FUNCTION(xnn_qs8_igemm_minmax_fp32_ukernel_2x2__scalar_fmagic)
+DECLARE_QS8_IGEMM_MINMAX_UKERNEL_FUNCTION(xnn_qs8_igemm_minmax_fp32_ukernel_3x2__scalar_fmagic)
+DECLARE_QS8_IGEMM_MINMAX_UKERNEL_FUNCTION(xnn_qs8_igemm_minmax_fp32_ukernel_4x2__scalar_fmagic)
DECLARE_QS8_IGEMM_MINMAX_UKERNEL_FUNCTION(xnn_qs8_igemm_minmax_rndnu_ukernel_1x4__scalar)
DECLARE_QS8_IGEMM_MINMAX_UKERNEL_FUNCTION(xnn_qs8_igemm_minmax_rndnu_ukernel_2x4__scalar)
DECLARE_QS8_IGEMM_MINMAX_UKERNEL_FUNCTION(xnn_qs8_igemm_minmax_rndnu_ukernel_3x4__scalar)
DECLARE_QS8_IGEMM_MINMAX_UKERNEL_FUNCTION(xnn_qs8_igemm_minmax_rndnu_ukernel_4x4__scalar)
-DECLARE_QS8_IGEMM_MINMAX_UKERNEL_FUNCTION(xnn_qs8_igemm_minmax_fp32_ukernel_1x4__scalar_lrint)
-DECLARE_QS8_IGEMM_MINMAX_UKERNEL_FUNCTION(xnn_qs8_igemm_minmax_fp32_ukernel_2x4__scalar_lrint)
-DECLARE_QS8_IGEMM_MINMAX_UKERNEL_FUNCTION(xnn_qs8_igemm_minmax_fp32_ukernel_3x4__scalar_lrint)
-DECLARE_QS8_IGEMM_MINMAX_UKERNEL_FUNCTION(xnn_qs8_igemm_minmax_fp32_ukernel_4x4__scalar_lrint)
+DECLARE_QS8_IGEMM_MINMAX_UKERNEL_FUNCTION(xnn_qs8_igemm_minmax_fp32_ukernel_1x4__scalar_lrintf)
+DECLARE_QS8_IGEMM_MINMAX_UKERNEL_FUNCTION(xnn_qs8_igemm_minmax_fp32_ukernel_2x4__scalar_lrintf)
+DECLARE_QS8_IGEMM_MINMAX_UKERNEL_FUNCTION(xnn_qs8_igemm_minmax_fp32_ukernel_3x4__scalar_lrintf)
+DECLARE_QS8_IGEMM_MINMAX_UKERNEL_FUNCTION(xnn_qs8_igemm_minmax_fp32_ukernel_4x4__scalar_lrintf)
-DECLARE_QS8_IGEMM_MINMAX_UKERNEL_FUNCTION(xnn_qs8_igemm_minmax_fp32_ukernel_1x4__scalar_magic)
-DECLARE_QS8_IGEMM_MINMAX_UKERNEL_FUNCTION(xnn_qs8_igemm_minmax_fp32_ukernel_2x4__scalar_magic)
-DECLARE_QS8_IGEMM_MINMAX_UKERNEL_FUNCTION(xnn_qs8_igemm_minmax_fp32_ukernel_3x4__scalar_magic)
-DECLARE_QS8_IGEMM_MINMAX_UKERNEL_FUNCTION(xnn_qs8_igemm_minmax_fp32_ukernel_4x4__scalar_magic)
+DECLARE_QS8_IGEMM_MINMAX_UKERNEL_FUNCTION(xnn_qs8_igemm_minmax_fp32_ukernel_1x4__scalar_fmagic)
+DECLARE_QS8_IGEMM_MINMAX_UKERNEL_FUNCTION(xnn_qs8_igemm_minmax_fp32_ukernel_2x4__scalar_fmagic)
+DECLARE_QS8_IGEMM_MINMAX_UKERNEL_FUNCTION(xnn_qs8_igemm_minmax_fp32_ukernel_3x4__scalar_fmagic)
+DECLARE_QS8_IGEMM_MINMAX_UKERNEL_FUNCTION(xnn_qs8_igemm_minmax_fp32_ukernel_4x4__scalar_fmagic)
#define DECLARE_QC8_IGEMM_MINMAX_UKERNEL_FUNCTION(fn_name) \
@@ -1244,25 +1244,25 @@
DECLARE_QC8_IGEMM_MINMAX_UKERNEL_FUNCTION(xnn_qc8_igemm_minmax_fp32_ukernel_2x4c8__wasmsimd_mul16_ld128)
DECLARE_QC8_IGEMM_MINMAX_UKERNEL_FUNCTION(xnn_qc8_igemm_minmax_fp32_ukernel_3x4c8__wasmsimd_mul16_ld128)
-DECLARE_QC8_IGEMM_MINMAX_UKERNEL_FUNCTION(xnn_qc8_igemm_minmax_fp32_ukernel_1x2__scalar_lrint)
-DECLARE_QC8_IGEMM_MINMAX_UKERNEL_FUNCTION(xnn_qc8_igemm_minmax_fp32_ukernel_2x2__scalar_lrint)
-DECLARE_QC8_IGEMM_MINMAX_UKERNEL_FUNCTION(xnn_qc8_igemm_minmax_fp32_ukernel_3x2__scalar_lrint)
-DECLARE_QC8_IGEMM_MINMAX_UKERNEL_FUNCTION(xnn_qc8_igemm_minmax_fp32_ukernel_4x2__scalar_lrint)
+DECLARE_QC8_IGEMM_MINMAX_UKERNEL_FUNCTION(xnn_qc8_igemm_minmax_fp32_ukernel_1x2__scalar_lrintf)
+DECLARE_QC8_IGEMM_MINMAX_UKERNEL_FUNCTION(xnn_qc8_igemm_minmax_fp32_ukernel_2x2__scalar_lrintf)
+DECLARE_QC8_IGEMM_MINMAX_UKERNEL_FUNCTION(xnn_qc8_igemm_minmax_fp32_ukernel_3x2__scalar_lrintf)
+DECLARE_QC8_IGEMM_MINMAX_UKERNEL_FUNCTION(xnn_qc8_igemm_minmax_fp32_ukernel_4x2__scalar_lrintf)
-DECLARE_QC8_IGEMM_MINMAX_UKERNEL_FUNCTION(xnn_qc8_igemm_minmax_fp32_ukernel_1x2__scalar_magic)
-DECLARE_QC8_IGEMM_MINMAX_UKERNEL_FUNCTION(xnn_qc8_igemm_minmax_fp32_ukernel_2x2__scalar_magic)
-DECLARE_QC8_IGEMM_MINMAX_UKERNEL_FUNCTION(xnn_qc8_igemm_minmax_fp32_ukernel_3x2__scalar_magic)
-DECLARE_QC8_IGEMM_MINMAX_UKERNEL_FUNCTION(xnn_qc8_igemm_minmax_fp32_ukernel_4x2__scalar_magic)
+DECLARE_QC8_IGEMM_MINMAX_UKERNEL_FUNCTION(xnn_qc8_igemm_minmax_fp32_ukernel_1x2__scalar_fmagic)
+DECLARE_QC8_IGEMM_MINMAX_UKERNEL_FUNCTION(xnn_qc8_igemm_minmax_fp32_ukernel_2x2__scalar_fmagic)
+DECLARE_QC8_IGEMM_MINMAX_UKERNEL_FUNCTION(xnn_qc8_igemm_minmax_fp32_ukernel_3x2__scalar_fmagic)
+DECLARE_QC8_IGEMM_MINMAX_UKERNEL_FUNCTION(xnn_qc8_igemm_minmax_fp32_ukernel_4x2__scalar_fmagic)
-DECLARE_QC8_IGEMM_MINMAX_UKERNEL_FUNCTION(xnn_qc8_igemm_minmax_fp32_ukernel_1x4__scalar_lrint)
-DECLARE_QC8_IGEMM_MINMAX_UKERNEL_FUNCTION(xnn_qc8_igemm_minmax_fp32_ukernel_2x4__scalar_lrint)
-DECLARE_QC8_IGEMM_MINMAX_UKERNEL_FUNCTION(xnn_qc8_igemm_minmax_fp32_ukernel_3x4__scalar_lrint)
-DECLARE_QC8_IGEMM_MINMAX_UKERNEL_FUNCTION(xnn_qc8_igemm_minmax_fp32_ukernel_4x4__scalar_lrint)
+DECLARE_QC8_IGEMM_MINMAX_UKERNEL_FUNCTION(xnn_qc8_igemm_minmax_fp32_ukernel_1x4__scalar_lrintf)
+DECLARE_QC8_IGEMM_MINMAX_UKERNEL_FUNCTION(xnn_qc8_igemm_minmax_fp32_ukernel_2x4__scalar_lrintf)
+DECLARE_QC8_IGEMM_MINMAX_UKERNEL_FUNCTION(xnn_qc8_igemm_minmax_fp32_ukernel_3x4__scalar_lrintf)
+DECLARE_QC8_IGEMM_MINMAX_UKERNEL_FUNCTION(xnn_qc8_igemm_minmax_fp32_ukernel_4x4__scalar_lrintf)
-DECLARE_QC8_IGEMM_MINMAX_UKERNEL_FUNCTION(xnn_qc8_igemm_minmax_fp32_ukernel_1x4__scalar_magic)
-DECLARE_QC8_IGEMM_MINMAX_UKERNEL_FUNCTION(xnn_qc8_igemm_minmax_fp32_ukernel_2x4__scalar_magic)
-DECLARE_QC8_IGEMM_MINMAX_UKERNEL_FUNCTION(xnn_qc8_igemm_minmax_fp32_ukernel_3x4__scalar_magic)
-DECLARE_QC8_IGEMM_MINMAX_UKERNEL_FUNCTION(xnn_qc8_igemm_minmax_fp32_ukernel_4x4__scalar_magic)
+DECLARE_QC8_IGEMM_MINMAX_UKERNEL_FUNCTION(xnn_qc8_igemm_minmax_fp32_ukernel_1x4__scalar_fmagic)
+DECLARE_QC8_IGEMM_MINMAX_UKERNEL_FUNCTION(xnn_qc8_igemm_minmax_fp32_ukernel_2x4__scalar_fmagic)
+DECLARE_QC8_IGEMM_MINMAX_UKERNEL_FUNCTION(xnn_qc8_igemm_minmax_fp32_ukernel_3x4__scalar_fmagic)
+DECLARE_QC8_IGEMM_MINMAX_UKERNEL_FUNCTION(xnn_qc8_igemm_minmax_fp32_ukernel_4x4__scalar_fmagic)
#ifdef __cplusplus
diff --git a/src/xnnpack/params-init.h b/src/xnnpack/params-init.h
index 54bb35f..7144760 100644
--- a/src/xnnpack/params-init.h
+++ b/src/xnnpack/params-init.h
@@ -26,8 +26,8 @@
uint8_t output_min, \
uint8_t output_max);
-DECLARE_INIT_QU8_CONV_MINMAX_PARAMS_FUNCTION(xnn_init_qu8_conv_minmax_fp32_scalar_lrint_params)
-DECLARE_INIT_QU8_CONV_MINMAX_PARAMS_FUNCTION(xnn_init_qu8_conv_minmax_fp32_scalar_magic_params)
+DECLARE_INIT_QU8_CONV_MINMAX_PARAMS_FUNCTION(xnn_init_qu8_conv_minmax_fp32_scalar_lrintf_params)
+DECLARE_INIT_QU8_CONV_MINMAX_PARAMS_FUNCTION(xnn_init_qu8_conv_minmax_fp32_scalar_fmagic_params)
#if XNN_ARCH_ARM || XNN_ARCH_ARM64
DECLARE_INIT_QU8_CONV_MINMAX_PARAMS_FUNCTION(xnn_init_qu8_conv_minmax_fp32_neon_params)
@@ -54,9 +54,8 @@
int8_t output_min, \
int8_t output_max);
-DECLARE_INIT_QS8_CONV_MINMAX_PARAMS_FUNCTION(xnn_init_qs8_conv_minmax_rndnu_scalar_params)
-DECLARE_INIT_QS8_CONV_MINMAX_PARAMS_FUNCTION(xnn_init_qs8_conv_minmax_fp32_scalar_lrint_params)
-DECLARE_INIT_QS8_CONV_MINMAX_PARAMS_FUNCTION(xnn_init_qs8_conv_minmax_fp32_scalar_magic_params)
+DECLARE_INIT_QS8_CONV_MINMAX_PARAMS_FUNCTION(xnn_init_qs8_conv_minmax_fp32_scalar_lrintf_params)
+DECLARE_INIT_QS8_CONV_MINMAX_PARAMS_FUNCTION(xnn_init_qs8_conv_minmax_fp32_scalar_fmagic_params)
#if XNN_ARCH_ARM || XNN_ARCH_ARM64
DECLARE_INIT_QS8_CONV_MINMAX_PARAMS_FUNCTION(xnn_init_qs8_conv_minmax_fp32_neon_params)
@@ -91,8 +90,8 @@
int8_t output_min, \
int8_t output_max);
-DECLARE_INIT_QS8_MINMAX_PARAMS_FUNCTION(xnn_init_qs8_minmax_scalar_lrint_params)
-DECLARE_INIT_QS8_MINMAX_PARAMS_FUNCTION(xnn_init_qs8_minmax_scalar_magic_params)
+DECLARE_INIT_QS8_MINMAX_PARAMS_FUNCTION(xnn_init_qs8_minmax_scalar_lrintf_params)
+DECLARE_INIT_QS8_MINMAX_PARAMS_FUNCTION(xnn_init_qs8_minmax_scalar_fmagic_params)
#if XNN_ARCH_ARM || XNN_ARCH_ARM64
DECLARE_INIT_QS8_MINMAX_PARAMS_FUNCTION(xnn_init_qs8_minmax_neon_params)
diff --git a/src/xnnpack/params.h b/src/xnnpack/params.h
index 2e5abc9..3c826b0 100644
--- a/src/xnnpack/params.h
+++ b/src/xnnpack/params.h
@@ -827,10 +827,10 @@
struct {
int32_t kernel_zero_point;
float scale;
- long output_min_less_zero_point;
- long output_max_less_zero_point;
+ float output_min_less_zero_point;
+ float output_max_less_zero_point;
int32_t output_zero_point;
- } fp32_scalar_lrint;
+ } fp32_scalar_lrintf;
struct {
int32_t kernel_zero_point;
float scale;
@@ -838,7 +838,7 @@
float output_max_less_zero_point;
float magic_bias;
int32_t magic_bias_less_output_zero_point;
- } fp32_scalar_magic;
+ } fp32_scalar_fmagic;
#if XNN_ARCH_ARM || XNN_ARCH_ARM64
struct {
uint8_t kernel_zero_point[4];
@@ -902,16 +902,16 @@
union xnn_qs8_minmax_params {
struct {
- long output_min_less_zero_point;
- long output_max_less_zero_point;
+ float output_min_less_zero_point;
+ float output_max_less_zero_point;
int32_t output_zero_point;
- } scalar_lrint;
+ } scalar_lrintf;
struct {
float output_min_less_zero_point;
float output_max_less_zero_point;
float magic_bias;
int32_t magic_bias_less_output_zero_point;
- } scalar_magic;
+ } scalar_fmagic;
#if XNN_ARCH_ARM || XNN_ARCH_ARM64
struct {
float magic_bias;
@@ -959,26 +959,18 @@
union xnn_qs8_conv_minmax_params {
struct {
- int32_t multiplier;
- uint32_t shift;
- int64_t rounding;
- int32_t output_min_less_zero_point;
- int32_t output_max_less_zero_point;
- int32_t output_zero_point;
- } rndnu_scalar;
- struct {
float scale;
- long output_min_less_zero_point;
- long output_max_less_zero_point;
+ float output_min_less_zero_point;
+ float output_max_less_zero_point;
int32_t output_zero_point;
- } fp32_scalar_lrint;
+ } fp32_scalar_lrintf;
struct {
float scale;
float output_min_less_zero_point;
float output_max_less_zero_point;
float magic_bias;
int32_t magic_bias_less_output_zero_point;
- } fp32_scalar_magic;
+ } fp32_scalar_fmagic;
#if XNN_ARCH_ARM || XNN_ARCH_ARM64
struct {
float scale;
diff --git a/src/xnnpack/requantization-stubs.h b/src/xnnpack/requantization-stubs.h
index 426746d..2f6c3bd 100644
--- a/src/xnnpack/requantization-stubs.h
+++ b/src/xnnpack/requantization-stubs.h
@@ -41,7 +41,7 @@
DECLARE_QU8_REQUANTIZATION_FUNCTION(xnn_qu8_requantize_fp32__sse2)
DECLARE_QU8_REQUANTIZATION_FUNCTION(xnn_qu8_requantize_fp32__wasmsimd)
DECLARE_QU8_REQUANTIZATION_FUNCTION(xnn_qu8_requantize_fp32__scalar_lrintf)
-DECLARE_QU8_REQUANTIZATION_FUNCTION(xnn_qu8_requantize_fp32__scalar_magic)
+DECLARE_QU8_REQUANTIZATION_FUNCTION(xnn_qu8_requantize_fp32__scalar_fmagic)
DECLARE_QU8_REQUANTIZATION_FUNCTION(xnn_qu8_requantize_gemmlowp__neon)
DECLARE_QU8_REQUANTIZATION_FUNCTION(xnn_qu8_requantize_gemmlowp__sse2)
@@ -83,7 +83,7 @@
DECLARE_QS8_REQUANTIZATION_FUNCTION(xnn_qs8_requantize_fp32__sse4)
DECLARE_QS8_REQUANTIZATION_FUNCTION(xnn_qs8_requantize_fp32__wasmsimd)
DECLARE_QS8_REQUANTIZATION_FUNCTION(xnn_qs8_requantize_fp32__scalar_lrintf)
-DECLARE_QS8_REQUANTIZATION_FUNCTION(xnn_qs8_requantize_fp32__scalar_magic)
+DECLARE_QS8_REQUANTIZATION_FUNCTION(xnn_qs8_requantize_fp32__scalar_fmagic)
DECLARE_QS8_REQUANTIZATION_FUNCTION(xnn_qs8_requantize_gemmlowp__neon)
DECLARE_QS8_REQUANTIZATION_FUNCTION(xnn_qs8_requantize_gemmlowp__sse2)
diff --git a/test/qc8-dwconv-minmax-fp32.cc b/test/qc8-dwconv-minmax-fp32.cc
index ef7d442..1103df3 100644
--- a/test/qc8-dwconv-minmax-fp32.cc
+++ b/test/qc8-dwconv-minmax-fp32.cc
@@ -11834,58 +11834,58 @@
#endif // XNN_ARCH_WASMSIMD || XNN_ARCH_WASMRELAXEDSIMD
-TEST(QC8_DWCONV_MINMAX_FP32_UP1X9__SCALAR_LRINT, c_eq_1) {
+TEST(QC8_DWCONV_MINMAX_FP32_UP1X9__SCALAR_LRINTF, c_eq_1) {
DWConvMicrokernelTester()
.cr(1)
.kr(9)
.channels(1)
- .Test(xnn_qc8_dwconv_minmax_fp32_ukernel_up1x9__scalar_lrint, xnn_init_qs8_minmax_scalar_lrint_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qc8_dwconv_minmax_fp32_ukernel_up1x9__scalar_lrintf, xnn_init_qs8_minmax_scalar_lrintf_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
-TEST(QC8_DWCONV_MINMAX_FP32_UP1X9__SCALAR_LRINT, c_gt_1) {
+TEST(QC8_DWCONV_MINMAX_FP32_UP1X9__SCALAR_LRINTF, c_gt_1) {
for (uint32_t channels = 2; channels < 10; channels++) {
DWConvMicrokernelTester()
.cr(1)
.kr(9)
.channels(channels)
- .Test(xnn_qc8_dwconv_minmax_fp32_ukernel_up1x9__scalar_lrint, xnn_init_qs8_minmax_scalar_lrint_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qc8_dwconv_minmax_fp32_ukernel_up1x9__scalar_lrintf, xnn_init_qs8_minmax_scalar_lrintf_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
-TEST(QC8_DWCONV_MINMAX_FP32_UP1X9__SCALAR_LRINT, c_gt_1_with_qmin) {
+TEST(QC8_DWCONV_MINMAX_FP32_UP1X9__SCALAR_LRINTF, c_gt_1_with_qmin) {
for (uint32_t channels = 2; channels < 10; channels++) {
DWConvMicrokernelTester()
.cr(1)
.kr(9)
.channels(channels)
.qmin(128)
- .Test(xnn_qc8_dwconv_minmax_fp32_ukernel_up1x9__scalar_lrint, xnn_init_qs8_minmax_scalar_lrint_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qc8_dwconv_minmax_fp32_ukernel_up1x9__scalar_lrintf, xnn_init_qs8_minmax_scalar_lrintf_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
-TEST(QC8_DWCONV_MINMAX_FP32_UP1X9__SCALAR_LRINT, c_gt_1_with_qmax) {
+TEST(QC8_DWCONV_MINMAX_FP32_UP1X9__SCALAR_LRINTF, c_gt_1_with_qmax) {
for (uint32_t channels = 2; channels < 10; channels++) {
DWConvMicrokernelTester()
.cr(1)
.kr(9)
.channels(channels)
.qmax(128)
- .Test(xnn_qc8_dwconv_minmax_fp32_ukernel_up1x9__scalar_lrint, xnn_init_qs8_minmax_scalar_lrint_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qc8_dwconv_minmax_fp32_ukernel_up1x9__scalar_lrintf, xnn_init_qs8_minmax_scalar_lrintf_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
-TEST(QC8_DWCONV_MINMAX_FP32_UP1X9__SCALAR_LRINT, multipixel) {
+TEST(QC8_DWCONV_MINMAX_FP32_UP1X9__SCALAR_LRINTF, multipixel) {
for (size_t channels = 1; channels <= 5; channels += 1) {
DWConvMicrokernelTester()
.cr(1)
.kr(9)
.channels(channels)
.width(3)
- .Test(xnn_qc8_dwconv_minmax_fp32_ukernel_up1x9__scalar_lrint, xnn_init_qs8_minmax_scalar_lrint_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qc8_dwconv_minmax_fp32_ukernel_up1x9__scalar_lrintf, xnn_init_qs8_minmax_scalar_lrintf_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
-TEST(QC8_DWCONV_MINMAX_FP32_UP1X9__SCALAR_LRINT, multipixel_with_step) {
+TEST(QC8_DWCONV_MINMAX_FP32_UP1X9__SCALAR_LRINTF, multipixel_with_step) {
for (size_t channels = 1; channels <= 5; channels += 1) {
for (size_t step = 2; step <= 9; step++) {
DWConvMicrokernelTester()
@@ -11894,12 +11894,12 @@
.channels(channels)
.width(3)
.step(step)
- .Test(xnn_qc8_dwconv_minmax_fp32_ukernel_up1x9__scalar_lrint, xnn_init_qs8_minmax_scalar_lrint_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qc8_dwconv_minmax_fp32_ukernel_up1x9__scalar_lrintf, xnn_init_qs8_minmax_scalar_lrintf_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
}
-TEST(QC8_DWCONV_MINMAX_FP32_UP1X9__SCALAR_LRINT, multipixel_with_output_stride) {
+TEST(QC8_DWCONV_MINMAX_FP32_UP1X9__SCALAR_LRINTF, multipixel_with_output_stride) {
for (size_t channels = 1; channels <= 5; channels += 1) {
DWConvMicrokernelTester()
.cr(1)
@@ -11907,11 +11907,11 @@
.channels(1)
.width(5)
.output_stride(7)
- .Test(xnn_qc8_dwconv_minmax_fp32_ukernel_up1x9__scalar_lrint, xnn_init_qs8_minmax_scalar_lrint_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qc8_dwconv_minmax_fp32_ukernel_up1x9__scalar_lrintf, xnn_init_qs8_minmax_scalar_lrintf_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
-TEST(QC8_DWCONV_MINMAX_FP32_UP1X9__SCALAR_LRINT, multipixel_with_qmin) {
+TEST(QC8_DWCONV_MINMAX_FP32_UP1X9__SCALAR_LRINTF, multipixel_with_qmin) {
for (size_t channels = 1; channels <= 5; channels += 1) {
DWConvMicrokernelTester()
.cr(1)
@@ -11919,11 +11919,11 @@
.channels(channels)
.width(3)
.qmin(128)
- .Test(xnn_qc8_dwconv_minmax_fp32_ukernel_up1x9__scalar_lrint, xnn_init_qs8_minmax_scalar_lrint_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qc8_dwconv_minmax_fp32_ukernel_up1x9__scalar_lrintf, xnn_init_qs8_minmax_scalar_lrintf_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
-TEST(QC8_DWCONV_MINMAX_FP32_UP1X9__SCALAR_LRINT, multipixel_with_qmax) {
+TEST(QC8_DWCONV_MINMAX_FP32_UP1X9__SCALAR_LRINTF, multipixel_with_qmax) {
for (size_t channels = 1; channels <= 5; channels += 1) {
DWConvMicrokernelTester()
.cr(1)
@@ -11931,22 +11931,22 @@
.channels(channels)
.width(3)
.qmax(128)
- .Test(xnn_qc8_dwconv_minmax_fp32_ukernel_up1x9__scalar_lrint, xnn_init_qs8_minmax_scalar_lrint_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qc8_dwconv_minmax_fp32_ukernel_up1x9__scalar_lrintf, xnn_init_qs8_minmax_scalar_lrintf_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
-TEST(QC8_DWCONV_MINMAX_FP32_UP1X9__SCALAR_LRINT, input_offset) {
+TEST(QC8_DWCONV_MINMAX_FP32_UP1X9__SCALAR_LRINTF, input_offset) {
for (uint32_t channels = 2; channels < 16; channels += 3) {
DWConvMicrokernelTester()
.cr(1)
.kr(9)
.channels(channels)
.input_offset(48)
- .Test(xnn_qc8_dwconv_minmax_fp32_ukernel_up1x9__scalar_lrint, xnn_init_qs8_minmax_scalar_lrint_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qc8_dwconv_minmax_fp32_ukernel_up1x9__scalar_lrintf, xnn_init_qs8_minmax_scalar_lrintf_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
-TEST(QC8_DWCONV_MINMAX_FP32_UP1X9__SCALAR_LRINT, zero) {
+TEST(QC8_DWCONV_MINMAX_FP32_UP1X9__SCALAR_LRINTF, zero) {
for (uint32_t mz = 0; mz < 9; mz++) {
for (uint32_t channels = 2; channels < 16; channels += 3) {
DWConvMicrokernelTester()
@@ -11955,105 +11955,105 @@
.channels(channels)
.input_offset(48)
.zero_index(mz)
- .Test(xnn_qc8_dwconv_minmax_fp32_ukernel_up1x9__scalar_lrint, xnn_init_qs8_minmax_scalar_lrint_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qc8_dwconv_minmax_fp32_ukernel_up1x9__scalar_lrintf, xnn_init_qs8_minmax_scalar_lrintf_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
}
-TEST(QC8_DWCONV_MINMAX_FP32_UP2X9__SCALAR_LRINT, c_eq_2) {
+TEST(QC8_DWCONV_MINMAX_FP32_UP2X9__SCALAR_LRINTF, c_eq_2) {
DWConvMicrokernelTester()
.cr(2)
.kr(9)
.channels(2)
- .Test(xnn_qc8_dwconv_minmax_fp32_ukernel_up2x9__scalar_lrint, xnn_init_qs8_minmax_scalar_lrint_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qc8_dwconv_minmax_fp32_ukernel_up2x9__scalar_lrintf, xnn_init_qs8_minmax_scalar_lrintf_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
-TEST(QC8_DWCONV_MINMAX_FP32_UP2X9__SCALAR_LRINT, c_div_2) {
+TEST(QC8_DWCONV_MINMAX_FP32_UP2X9__SCALAR_LRINTF, c_div_2) {
for (uint32_t channels = 4; channels < 32; channels += 6) {
DWConvMicrokernelTester()
.cr(2)
.kr(9)
.channels(channels)
- .Test(xnn_qc8_dwconv_minmax_fp32_ukernel_up2x9__scalar_lrint, xnn_init_qs8_minmax_scalar_lrint_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qc8_dwconv_minmax_fp32_ukernel_up2x9__scalar_lrintf, xnn_init_qs8_minmax_scalar_lrintf_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
-TEST(QC8_DWCONV_MINMAX_FP32_UP2X9__SCALAR_LRINT, c_div_2_with_qmin) {
+TEST(QC8_DWCONV_MINMAX_FP32_UP2X9__SCALAR_LRINTF, c_div_2_with_qmin) {
for (uint32_t channels = 4; channels < 32; channels += 6) {
DWConvMicrokernelTester()
.cr(2)
.kr(9)
.channels(channels)
.qmin(128)
- .Test(xnn_qc8_dwconv_minmax_fp32_ukernel_up2x9__scalar_lrint, xnn_init_qs8_minmax_scalar_lrint_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qc8_dwconv_minmax_fp32_ukernel_up2x9__scalar_lrintf, xnn_init_qs8_minmax_scalar_lrintf_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
-TEST(QC8_DWCONV_MINMAX_FP32_UP2X9__SCALAR_LRINT, c_div_2_with_qmax) {
+TEST(QC8_DWCONV_MINMAX_FP32_UP2X9__SCALAR_LRINTF, c_div_2_with_qmax) {
for (uint32_t channels = 4; channels < 32; channels += 6) {
DWConvMicrokernelTester()
.cr(2)
.kr(9)
.channels(channels)
.qmax(128)
- .Test(xnn_qc8_dwconv_minmax_fp32_ukernel_up2x9__scalar_lrint, xnn_init_qs8_minmax_scalar_lrint_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qc8_dwconv_minmax_fp32_ukernel_up2x9__scalar_lrintf, xnn_init_qs8_minmax_scalar_lrintf_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
-TEST(QC8_DWCONV_MINMAX_FP32_UP2X9__SCALAR_LRINT, c_lt_2) {
+TEST(QC8_DWCONV_MINMAX_FP32_UP2X9__SCALAR_LRINTF, c_lt_2) {
for (uint32_t channels = 1; channels < 2; channels++) {
DWConvMicrokernelTester()
.cr(2)
.kr(9)
.channels(channels)
- .Test(xnn_qc8_dwconv_minmax_fp32_ukernel_up2x9__scalar_lrint, xnn_init_qs8_minmax_scalar_lrint_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qc8_dwconv_minmax_fp32_ukernel_up2x9__scalar_lrintf, xnn_init_qs8_minmax_scalar_lrintf_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
-TEST(QC8_DWCONV_MINMAX_FP32_UP2X9__SCALAR_LRINT, c_gt_2) {
+TEST(QC8_DWCONV_MINMAX_FP32_UP2X9__SCALAR_LRINTF, c_gt_2) {
for (uint32_t channels = 3; channels < 4; channels++) {
DWConvMicrokernelTester()
.cr(2)
.kr(9)
.channels(channels)
- .Test(xnn_qc8_dwconv_minmax_fp32_ukernel_up2x9__scalar_lrint, xnn_init_qs8_minmax_scalar_lrint_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qc8_dwconv_minmax_fp32_ukernel_up2x9__scalar_lrintf, xnn_init_qs8_minmax_scalar_lrintf_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
-TEST(QC8_DWCONV_MINMAX_FP32_UP2X9__SCALAR_LRINT, c_gt_2_with_qmin) {
+TEST(QC8_DWCONV_MINMAX_FP32_UP2X9__SCALAR_LRINTF, c_gt_2_with_qmin) {
for (uint32_t channels = 3; channels < 4; channels++) {
DWConvMicrokernelTester()
.cr(2)
.kr(9)
.channels(channels)
.qmin(128)
- .Test(xnn_qc8_dwconv_minmax_fp32_ukernel_up2x9__scalar_lrint, xnn_init_qs8_minmax_scalar_lrint_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qc8_dwconv_minmax_fp32_ukernel_up2x9__scalar_lrintf, xnn_init_qs8_minmax_scalar_lrintf_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
-TEST(QC8_DWCONV_MINMAX_FP32_UP2X9__SCALAR_LRINT, c_gt_2_with_qmax) {
+TEST(QC8_DWCONV_MINMAX_FP32_UP2X9__SCALAR_LRINTF, c_gt_2_with_qmax) {
for (uint32_t channels = 3; channels < 4; channels++) {
DWConvMicrokernelTester()
.cr(2)
.kr(9)
.channels(channels)
.qmax(128)
- .Test(xnn_qc8_dwconv_minmax_fp32_ukernel_up2x9__scalar_lrint, xnn_init_qs8_minmax_scalar_lrint_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qc8_dwconv_minmax_fp32_ukernel_up2x9__scalar_lrintf, xnn_init_qs8_minmax_scalar_lrintf_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
-TEST(QC8_DWCONV_MINMAX_FP32_UP2X9__SCALAR_LRINT, multipixel) {
+TEST(QC8_DWCONV_MINMAX_FP32_UP2X9__SCALAR_LRINTF, multipixel) {
for (size_t channels = 1; channels <= 10; channels += 1) {
DWConvMicrokernelTester()
.cr(2)
.kr(9)
.channels(channels)
.width(3)
- .Test(xnn_qc8_dwconv_minmax_fp32_ukernel_up2x9__scalar_lrint, xnn_init_qs8_minmax_scalar_lrint_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qc8_dwconv_minmax_fp32_ukernel_up2x9__scalar_lrintf, xnn_init_qs8_minmax_scalar_lrintf_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
-TEST(QC8_DWCONV_MINMAX_FP32_UP2X9__SCALAR_LRINT, multipixel_with_step) {
+TEST(QC8_DWCONV_MINMAX_FP32_UP2X9__SCALAR_LRINTF, multipixel_with_step) {
for (size_t channels = 1; channels <= 10; channels += 1) {
for (size_t step = 2; step <= 9; step++) {
DWConvMicrokernelTester()
@@ -12062,12 +12062,12 @@
.channels(channels)
.width(3)
.step(step)
- .Test(xnn_qc8_dwconv_minmax_fp32_ukernel_up2x9__scalar_lrint, xnn_init_qs8_minmax_scalar_lrint_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qc8_dwconv_minmax_fp32_ukernel_up2x9__scalar_lrintf, xnn_init_qs8_minmax_scalar_lrintf_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
}
-TEST(QC8_DWCONV_MINMAX_FP32_UP2X9__SCALAR_LRINT, multipixel_with_output_stride) {
+TEST(QC8_DWCONV_MINMAX_FP32_UP2X9__SCALAR_LRINTF, multipixel_with_output_stride) {
for (size_t channels = 1; channels <= 10; channels += 1) {
DWConvMicrokernelTester()
.cr(2)
@@ -12075,11 +12075,11 @@
.channels(2)
.width(5)
.output_stride(13)
- .Test(xnn_qc8_dwconv_minmax_fp32_ukernel_up2x9__scalar_lrint, xnn_init_qs8_minmax_scalar_lrint_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qc8_dwconv_minmax_fp32_ukernel_up2x9__scalar_lrintf, xnn_init_qs8_minmax_scalar_lrintf_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
-TEST(QC8_DWCONV_MINMAX_FP32_UP2X9__SCALAR_LRINT, multipixel_with_qmin) {
+TEST(QC8_DWCONV_MINMAX_FP32_UP2X9__SCALAR_LRINTF, multipixel_with_qmin) {
for (size_t channels = 1; channels <= 10; channels += 1) {
DWConvMicrokernelTester()
.cr(2)
@@ -12087,11 +12087,11 @@
.channels(channels)
.width(3)
.qmin(128)
- .Test(xnn_qc8_dwconv_minmax_fp32_ukernel_up2x9__scalar_lrint, xnn_init_qs8_minmax_scalar_lrint_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qc8_dwconv_minmax_fp32_ukernel_up2x9__scalar_lrintf, xnn_init_qs8_minmax_scalar_lrintf_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
-TEST(QC8_DWCONV_MINMAX_FP32_UP2X9__SCALAR_LRINT, multipixel_with_qmax) {
+TEST(QC8_DWCONV_MINMAX_FP32_UP2X9__SCALAR_LRINTF, multipixel_with_qmax) {
for (size_t channels = 1; channels <= 10; channels += 1) {
DWConvMicrokernelTester()
.cr(2)
@@ -12099,22 +12099,22 @@
.channels(channels)
.width(3)
.qmax(128)
- .Test(xnn_qc8_dwconv_minmax_fp32_ukernel_up2x9__scalar_lrint, xnn_init_qs8_minmax_scalar_lrint_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qc8_dwconv_minmax_fp32_ukernel_up2x9__scalar_lrintf, xnn_init_qs8_minmax_scalar_lrintf_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
-TEST(QC8_DWCONV_MINMAX_FP32_UP2X9__SCALAR_LRINT, input_offset) {
+TEST(QC8_DWCONV_MINMAX_FP32_UP2X9__SCALAR_LRINTF, input_offset) {
for (uint32_t channels = 4; channels < 32; channels += 6) {
DWConvMicrokernelTester()
.cr(2)
.kr(9)
.channels(channels)
.input_offset(80)
- .Test(xnn_qc8_dwconv_minmax_fp32_ukernel_up2x9__scalar_lrint, xnn_init_qs8_minmax_scalar_lrint_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qc8_dwconv_minmax_fp32_ukernel_up2x9__scalar_lrintf, xnn_init_qs8_minmax_scalar_lrintf_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
-TEST(QC8_DWCONV_MINMAX_FP32_UP2X9__SCALAR_LRINT, zero) {
+TEST(QC8_DWCONV_MINMAX_FP32_UP2X9__SCALAR_LRINTF, zero) {
for (uint32_t mz = 0; mz < 9; mz++) {
for (uint32_t channels = 4; channels < 32; channels += 6) {
DWConvMicrokernelTester()
@@ -12123,105 +12123,105 @@
.channels(channels)
.input_offset(80)
.zero_index(mz)
- .Test(xnn_qc8_dwconv_minmax_fp32_ukernel_up2x9__scalar_lrint, xnn_init_qs8_minmax_scalar_lrint_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qc8_dwconv_minmax_fp32_ukernel_up2x9__scalar_lrintf, xnn_init_qs8_minmax_scalar_lrintf_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
}
-TEST(QC8_DWCONV_MINMAX_FP32_UP4X9__SCALAR_LRINT, c_eq_4) {
+TEST(QC8_DWCONV_MINMAX_FP32_UP4X9__SCALAR_LRINTF, c_eq_4) {
DWConvMicrokernelTester()
.cr(4)
.kr(9)
.channels(4)
- .Test(xnn_qc8_dwconv_minmax_fp32_ukernel_up4x9__scalar_lrint, xnn_init_qs8_minmax_scalar_lrint_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qc8_dwconv_minmax_fp32_ukernel_up4x9__scalar_lrintf, xnn_init_qs8_minmax_scalar_lrintf_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
-TEST(QC8_DWCONV_MINMAX_FP32_UP4X9__SCALAR_LRINT, c_div_4) {
+TEST(QC8_DWCONV_MINMAX_FP32_UP4X9__SCALAR_LRINTF, c_div_4) {
for (uint32_t channels = 8; channels < 64; channels += 12) {
DWConvMicrokernelTester()
.cr(4)
.kr(9)
.channels(channels)
- .Test(xnn_qc8_dwconv_minmax_fp32_ukernel_up4x9__scalar_lrint, xnn_init_qs8_minmax_scalar_lrint_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qc8_dwconv_minmax_fp32_ukernel_up4x9__scalar_lrintf, xnn_init_qs8_minmax_scalar_lrintf_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
-TEST(QC8_DWCONV_MINMAX_FP32_UP4X9__SCALAR_LRINT, c_div_4_with_qmin) {
+TEST(QC8_DWCONV_MINMAX_FP32_UP4X9__SCALAR_LRINTF, c_div_4_with_qmin) {
for (uint32_t channels = 8; channels < 64; channels += 12) {
DWConvMicrokernelTester()
.cr(4)
.kr(9)
.channels(channels)
.qmin(128)
- .Test(xnn_qc8_dwconv_minmax_fp32_ukernel_up4x9__scalar_lrint, xnn_init_qs8_minmax_scalar_lrint_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qc8_dwconv_minmax_fp32_ukernel_up4x9__scalar_lrintf, xnn_init_qs8_minmax_scalar_lrintf_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
-TEST(QC8_DWCONV_MINMAX_FP32_UP4X9__SCALAR_LRINT, c_div_4_with_qmax) {
+TEST(QC8_DWCONV_MINMAX_FP32_UP4X9__SCALAR_LRINTF, c_div_4_with_qmax) {
for (uint32_t channels = 8; channels < 64; channels += 12) {
DWConvMicrokernelTester()
.cr(4)
.kr(9)
.channels(channels)
.qmax(128)
- .Test(xnn_qc8_dwconv_minmax_fp32_ukernel_up4x9__scalar_lrint, xnn_init_qs8_minmax_scalar_lrint_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qc8_dwconv_minmax_fp32_ukernel_up4x9__scalar_lrintf, xnn_init_qs8_minmax_scalar_lrintf_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
-TEST(QC8_DWCONV_MINMAX_FP32_UP4X9__SCALAR_LRINT, c_lt_4) {
+TEST(QC8_DWCONV_MINMAX_FP32_UP4X9__SCALAR_LRINTF, c_lt_4) {
for (uint32_t channels = 1; channels < 4; channels++) {
DWConvMicrokernelTester()
.cr(4)
.kr(9)
.channels(channels)
- .Test(xnn_qc8_dwconv_minmax_fp32_ukernel_up4x9__scalar_lrint, xnn_init_qs8_minmax_scalar_lrint_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qc8_dwconv_minmax_fp32_ukernel_up4x9__scalar_lrintf, xnn_init_qs8_minmax_scalar_lrintf_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
-TEST(QC8_DWCONV_MINMAX_FP32_UP4X9__SCALAR_LRINT, c_gt_4) {
+TEST(QC8_DWCONV_MINMAX_FP32_UP4X9__SCALAR_LRINTF, c_gt_4) {
for (uint32_t channels = 5; channels < 8; channels++) {
DWConvMicrokernelTester()
.cr(4)
.kr(9)
.channels(channels)
- .Test(xnn_qc8_dwconv_minmax_fp32_ukernel_up4x9__scalar_lrint, xnn_init_qs8_minmax_scalar_lrint_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qc8_dwconv_minmax_fp32_ukernel_up4x9__scalar_lrintf, xnn_init_qs8_minmax_scalar_lrintf_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
-TEST(QC8_DWCONV_MINMAX_FP32_UP4X9__SCALAR_LRINT, c_gt_4_with_qmin) {
+TEST(QC8_DWCONV_MINMAX_FP32_UP4X9__SCALAR_LRINTF, c_gt_4_with_qmin) {
for (uint32_t channels = 5; channels < 8; channels++) {
DWConvMicrokernelTester()
.cr(4)
.kr(9)
.channels(channels)
.qmin(128)
- .Test(xnn_qc8_dwconv_minmax_fp32_ukernel_up4x9__scalar_lrint, xnn_init_qs8_minmax_scalar_lrint_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qc8_dwconv_minmax_fp32_ukernel_up4x9__scalar_lrintf, xnn_init_qs8_minmax_scalar_lrintf_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
-TEST(QC8_DWCONV_MINMAX_FP32_UP4X9__SCALAR_LRINT, c_gt_4_with_qmax) {
+TEST(QC8_DWCONV_MINMAX_FP32_UP4X9__SCALAR_LRINTF, c_gt_4_with_qmax) {
for (uint32_t channels = 5; channels < 8; channels++) {
DWConvMicrokernelTester()
.cr(4)
.kr(9)
.channels(channels)
.qmax(128)
- .Test(xnn_qc8_dwconv_minmax_fp32_ukernel_up4x9__scalar_lrint, xnn_init_qs8_minmax_scalar_lrint_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qc8_dwconv_minmax_fp32_ukernel_up4x9__scalar_lrintf, xnn_init_qs8_minmax_scalar_lrintf_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
-TEST(QC8_DWCONV_MINMAX_FP32_UP4X9__SCALAR_LRINT, multipixel) {
+TEST(QC8_DWCONV_MINMAX_FP32_UP4X9__SCALAR_LRINTF, multipixel) {
for (size_t channels = 1; channels <= 20; channels += 3) {
DWConvMicrokernelTester()
.cr(4)
.kr(9)
.channels(channels)
.width(3)
- .Test(xnn_qc8_dwconv_minmax_fp32_ukernel_up4x9__scalar_lrint, xnn_init_qs8_minmax_scalar_lrint_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qc8_dwconv_minmax_fp32_ukernel_up4x9__scalar_lrintf, xnn_init_qs8_minmax_scalar_lrintf_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
-TEST(QC8_DWCONV_MINMAX_FP32_UP4X9__SCALAR_LRINT, multipixel_with_step) {
+TEST(QC8_DWCONV_MINMAX_FP32_UP4X9__SCALAR_LRINTF, multipixel_with_step) {
for (size_t channels = 1; channels <= 20; channels += 3) {
for (size_t step = 2; step <= 9; step++) {
DWConvMicrokernelTester()
@@ -12230,12 +12230,12 @@
.channels(channels)
.width(3)
.step(step)
- .Test(xnn_qc8_dwconv_minmax_fp32_ukernel_up4x9__scalar_lrint, xnn_init_qs8_minmax_scalar_lrint_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qc8_dwconv_minmax_fp32_ukernel_up4x9__scalar_lrintf, xnn_init_qs8_minmax_scalar_lrintf_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
}
-TEST(QC8_DWCONV_MINMAX_FP32_UP4X9__SCALAR_LRINT, multipixel_with_output_stride) {
+TEST(QC8_DWCONV_MINMAX_FP32_UP4X9__SCALAR_LRINTF, multipixel_with_output_stride) {
for (size_t channels = 1; channels <= 20; channels += 3) {
DWConvMicrokernelTester()
.cr(4)
@@ -12243,11 +12243,11 @@
.channels(4)
.width(5)
.output_stride(23)
- .Test(xnn_qc8_dwconv_minmax_fp32_ukernel_up4x9__scalar_lrint, xnn_init_qs8_minmax_scalar_lrint_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qc8_dwconv_minmax_fp32_ukernel_up4x9__scalar_lrintf, xnn_init_qs8_minmax_scalar_lrintf_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
-TEST(QC8_DWCONV_MINMAX_FP32_UP4X9__SCALAR_LRINT, multipixel_with_qmin) {
+TEST(QC8_DWCONV_MINMAX_FP32_UP4X9__SCALAR_LRINTF, multipixel_with_qmin) {
for (size_t channels = 1; channels <= 20; channels += 3) {
DWConvMicrokernelTester()
.cr(4)
@@ -12255,11 +12255,11 @@
.channels(channels)
.width(3)
.qmin(128)
- .Test(xnn_qc8_dwconv_minmax_fp32_ukernel_up4x9__scalar_lrint, xnn_init_qs8_minmax_scalar_lrint_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qc8_dwconv_minmax_fp32_ukernel_up4x9__scalar_lrintf, xnn_init_qs8_minmax_scalar_lrintf_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
-TEST(QC8_DWCONV_MINMAX_FP32_UP4X9__SCALAR_LRINT, multipixel_with_qmax) {
+TEST(QC8_DWCONV_MINMAX_FP32_UP4X9__SCALAR_LRINTF, multipixel_with_qmax) {
for (size_t channels = 1; channels <= 20; channels += 3) {
DWConvMicrokernelTester()
.cr(4)
@@ -12267,22 +12267,22 @@
.channels(channels)
.width(3)
.qmax(128)
- .Test(xnn_qc8_dwconv_minmax_fp32_ukernel_up4x9__scalar_lrint, xnn_init_qs8_minmax_scalar_lrint_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qc8_dwconv_minmax_fp32_ukernel_up4x9__scalar_lrintf, xnn_init_qs8_minmax_scalar_lrintf_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
-TEST(QC8_DWCONV_MINMAX_FP32_UP4X9__SCALAR_LRINT, input_offset) {
+TEST(QC8_DWCONV_MINMAX_FP32_UP4X9__SCALAR_LRINTF, input_offset) {
for (uint32_t channels = 8; channels < 64; channels += 12) {
DWConvMicrokernelTester()
.cr(4)
.kr(9)
.channels(channels)
.input_offset(112)
- .Test(xnn_qc8_dwconv_minmax_fp32_ukernel_up4x9__scalar_lrint, xnn_init_qs8_minmax_scalar_lrint_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qc8_dwconv_minmax_fp32_ukernel_up4x9__scalar_lrintf, xnn_init_qs8_minmax_scalar_lrintf_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
-TEST(QC8_DWCONV_MINMAX_FP32_UP4X9__SCALAR_LRINT, zero) {
+TEST(QC8_DWCONV_MINMAX_FP32_UP4X9__SCALAR_LRINTF, zero) {
for (uint32_t mz = 0; mz < 9; mz++) {
for (uint32_t channels = 8; channels < 64; channels += 12) {
DWConvMicrokernelTester()
@@ -12291,63 +12291,63 @@
.channels(channels)
.input_offset(112)
.zero_index(mz)
- .Test(xnn_qc8_dwconv_minmax_fp32_ukernel_up4x9__scalar_lrint, xnn_init_qs8_minmax_scalar_lrint_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qc8_dwconv_minmax_fp32_ukernel_up4x9__scalar_lrintf, xnn_init_qs8_minmax_scalar_lrintf_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
}
-TEST(QC8_DWCONV_MINMAX_FP32_UP1X9__SCALAR_MAGIC, c_eq_1) {
+TEST(QC8_DWCONV_MINMAX_FP32_UP1X9__SCALAR_FMAGIC, c_eq_1) {
DWConvMicrokernelTester()
.cr(1)
.kr(9)
.channels(1)
- .Test(xnn_qc8_dwconv_minmax_fp32_ukernel_up1x9__scalar_magic, xnn_init_qs8_minmax_scalar_magic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qc8_dwconv_minmax_fp32_ukernel_up1x9__scalar_fmagic, xnn_init_qs8_minmax_scalar_fmagic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
-TEST(QC8_DWCONV_MINMAX_FP32_UP1X9__SCALAR_MAGIC, c_gt_1) {
+TEST(QC8_DWCONV_MINMAX_FP32_UP1X9__SCALAR_FMAGIC, c_gt_1) {
for (uint32_t channels = 2; channels < 10; channels++) {
DWConvMicrokernelTester()
.cr(1)
.kr(9)
.channels(channels)
- .Test(xnn_qc8_dwconv_minmax_fp32_ukernel_up1x9__scalar_magic, xnn_init_qs8_minmax_scalar_magic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qc8_dwconv_minmax_fp32_ukernel_up1x9__scalar_fmagic, xnn_init_qs8_minmax_scalar_fmagic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
-TEST(QC8_DWCONV_MINMAX_FP32_UP1X9__SCALAR_MAGIC, c_gt_1_with_qmin) {
+TEST(QC8_DWCONV_MINMAX_FP32_UP1X9__SCALAR_FMAGIC, c_gt_1_with_qmin) {
for (uint32_t channels = 2; channels < 10; channels++) {
DWConvMicrokernelTester()
.cr(1)
.kr(9)
.channels(channels)
.qmin(128)
- .Test(xnn_qc8_dwconv_minmax_fp32_ukernel_up1x9__scalar_magic, xnn_init_qs8_minmax_scalar_magic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qc8_dwconv_minmax_fp32_ukernel_up1x9__scalar_fmagic, xnn_init_qs8_minmax_scalar_fmagic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
-TEST(QC8_DWCONV_MINMAX_FP32_UP1X9__SCALAR_MAGIC, c_gt_1_with_qmax) {
+TEST(QC8_DWCONV_MINMAX_FP32_UP1X9__SCALAR_FMAGIC, c_gt_1_with_qmax) {
for (uint32_t channels = 2; channels < 10; channels++) {
DWConvMicrokernelTester()
.cr(1)
.kr(9)
.channels(channels)
.qmax(128)
- .Test(xnn_qc8_dwconv_minmax_fp32_ukernel_up1x9__scalar_magic, xnn_init_qs8_minmax_scalar_magic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qc8_dwconv_minmax_fp32_ukernel_up1x9__scalar_fmagic, xnn_init_qs8_minmax_scalar_fmagic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
-TEST(QC8_DWCONV_MINMAX_FP32_UP1X9__SCALAR_MAGIC, multipixel) {
+TEST(QC8_DWCONV_MINMAX_FP32_UP1X9__SCALAR_FMAGIC, multipixel) {
for (size_t channels = 1; channels <= 5; channels += 1) {
DWConvMicrokernelTester()
.cr(1)
.kr(9)
.channels(channels)
.width(3)
- .Test(xnn_qc8_dwconv_minmax_fp32_ukernel_up1x9__scalar_magic, xnn_init_qs8_minmax_scalar_magic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qc8_dwconv_minmax_fp32_ukernel_up1x9__scalar_fmagic, xnn_init_qs8_minmax_scalar_fmagic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
-TEST(QC8_DWCONV_MINMAX_FP32_UP1X9__SCALAR_MAGIC, multipixel_with_step) {
+TEST(QC8_DWCONV_MINMAX_FP32_UP1X9__SCALAR_FMAGIC, multipixel_with_step) {
for (size_t channels = 1; channels <= 5; channels += 1) {
for (size_t step = 2; step <= 9; step++) {
DWConvMicrokernelTester()
@@ -12356,12 +12356,12 @@
.channels(channels)
.width(3)
.step(step)
- .Test(xnn_qc8_dwconv_minmax_fp32_ukernel_up1x9__scalar_magic, xnn_init_qs8_minmax_scalar_magic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qc8_dwconv_minmax_fp32_ukernel_up1x9__scalar_fmagic, xnn_init_qs8_minmax_scalar_fmagic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
}
-TEST(QC8_DWCONV_MINMAX_FP32_UP1X9__SCALAR_MAGIC, multipixel_with_output_stride) {
+TEST(QC8_DWCONV_MINMAX_FP32_UP1X9__SCALAR_FMAGIC, multipixel_with_output_stride) {
for (size_t channels = 1; channels <= 5; channels += 1) {
DWConvMicrokernelTester()
.cr(1)
@@ -12369,11 +12369,11 @@
.channels(1)
.width(5)
.output_stride(7)
- .Test(xnn_qc8_dwconv_minmax_fp32_ukernel_up1x9__scalar_magic, xnn_init_qs8_minmax_scalar_magic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qc8_dwconv_minmax_fp32_ukernel_up1x9__scalar_fmagic, xnn_init_qs8_minmax_scalar_fmagic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
-TEST(QC8_DWCONV_MINMAX_FP32_UP1X9__SCALAR_MAGIC, multipixel_with_qmin) {
+TEST(QC8_DWCONV_MINMAX_FP32_UP1X9__SCALAR_FMAGIC, multipixel_with_qmin) {
for (size_t channels = 1; channels <= 5; channels += 1) {
DWConvMicrokernelTester()
.cr(1)
@@ -12381,11 +12381,11 @@
.channels(channels)
.width(3)
.qmin(128)
- .Test(xnn_qc8_dwconv_minmax_fp32_ukernel_up1x9__scalar_magic, xnn_init_qs8_minmax_scalar_magic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qc8_dwconv_minmax_fp32_ukernel_up1x9__scalar_fmagic, xnn_init_qs8_minmax_scalar_fmagic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
-TEST(QC8_DWCONV_MINMAX_FP32_UP1X9__SCALAR_MAGIC, multipixel_with_qmax) {
+TEST(QC8_DWCONV_MINMAX_FP32_UP1X9__SCALAR_FMAGIC, multipixel_with_qmax) {
for (size_t channels = 1; channels <= 5; channels += 1) {
DWConvMicrokernelTester()
.cr(1)
@@ -12393,22 +12393,22 @@
.channels(channels)
.width(3)
.qmax(128)
- .Test(xnn_qc8_dwconv_minmax_fp32_ukernel_up1x9__scalar_magic, xnn_init_qs8_minmax_scalar_magic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qc8_dwconv_minmax_fp32_ukernel_up1x9__scalar_fmagic, xnn_init_qs8_minmax_scalar_fmagic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
-TEST(QC8_DWCONV_MINMAX_FP32_UP1X9__SCALAR_MAGIC, input_offset) {
+TEST(QC8_DWCONV_MINMAX_FP32_UP1X9__SCALAR_FMAGIC, input_offset) {
for (uint32_t channels = 2; channels < 16; channels += 3) {
DWConvMicrokernelTester()
.cr(1)
.kr(9)
.channels(channels)
.input_offset(48)
- .Test(xnn_qc8_dwconv_minmax_fp32_ukernel_up1x9__scalar_magic, xnn_init_qs8_minmax_scalar_magic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qc8_dwconv_minmax_fp32_ukernel_up1x9__scalar_fmagic, xnn_init_qs8_minmax_scalar_fmagic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
-TEST(QC8_DWCONV_MINMAX_FP32_UP1X9__SCALAR_MAGIC, zero) {
+TEST(QC8_DWCONV_MINMAX_FP32_UP1X9__SCALAR_FMAGIC, zero) {
for (uint32_t mz = 0; mz < 9; mz++) {
for (uint32_t channels = 2; channels < 16; channels += 3) {
DWConvMicrokernelTester()
@@ -12417,105 +12417,105 @@
.channels(channels)
.input_offset(48)
.zero_index(mz)
- .Test(xnn_qc8_dwconv_minmax_fp32_ukernel_up1x9__scalar_magic, xnn_init_qs8_minmax_scalar_magic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qc8_dwconv_minmax_fp32_ukernel_up1x9__scalar_fmagic, xnn_init_qs8_minmax_scalar_fmagic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
}
-TEST(QC8_DWCONV_MINMAX_FP32_UP2X9__SCALAR_MAGIC, c_eq_2) {
+TEST(QC8_DWCONV_MINMAX_FP32_UP2X9__SCALAR_FMAGIC, c_eq_2) {
DWConvMicrokernelTester()
.cr(2)
.kr(9)
.channels(2)
- .Test(xnn_qc8_dwconv_minmax_fp32_ukernel_up2x9__scalar_magic, xnn_init_qs8_minmax_scalar_magic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qc8_dwconv_minmax_fp32_ukernel_up2x9__scalar_fmagic, xnn_init_qs8_minmax_scalar_fmagic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
-TEST(QC8_DWCONV_MINMAX_FP32_UP2X9__SCALAR_MAGIC, c_div_2) {
+TEST(QC8_DWCONV_MINMAX_FP32_UP2X9__SCALAR_FMAGIC, c_div_2) {
for (uint32_t channels = 4; channels < 32; channels += 6) {
DWConvMicrokernelTester()
.cr(2)
.kr(9)
.channels(channels)
- .Test(xnn_qc8_dwconv_minmax_fp32_ukernel_up2x9__scalar_magic, xnn_init_qs8_minmax_scalar_magic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qc8_dwconv_minmax_fp32_ukernel_up2x9__scalar_fmagic, xnn_init_qs8_minmax_scalar_fmagic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
-TEST(QC8_DWCONV_MINMAX_FP32_UP2X9__SCALAR_MAGIC, c_div_2_with_qmin) {
+TEST(QC8_DWCONV_MINMAX_FP32_UP2X9__SCALAR_FMAGIC, c_div_2_with_qmin) {
for (uint32_t channels = 4; channels < 32; channels += 6) {
DWConvMicrokernelTester()
.cr(2)
.kr(9)
.channels(channels)
.qmin(128)
- .Test(xnn_qc8_dwconv_minmax_fp32_ukernel_up2x9__scalar_magic, xnn_init_qs8_minmax_scalar_magic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qc8_dwconv_minmax_fp32_ukernel_up2x9__scalar_fmagic, xnn_init_qs8_minmax_scalar_fmagic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
-TEST(QC8_DWCONV_MINMAX_FP32_UP2X9__SCALAR_MAGIC, c_div_2_with_qmax) {
+TEST(QC8_DWCONV_MINMAX_FP32_UP2X9__SCALAR_FMAGIC, c_div_2_with_qmax) {
for (uint32_t channels = 4; channels < 32; channels += 6) {
DWConvMicrokernelTester()
.cr(2)
.kr(9)
.channels(channels)
.qmax(128)
- .Test(xnn_qc8_dwconv_minmax_fp32_ukernel_up2x9__scalar_magic, xnn_init_qs8_minmax_scalar_magic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qc8_dwconv_minmax_fp32_ukernel_up2x9__scalar_fmagic, xnn_init_qs8_minmax_scalar_fmagic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
-TEST(QC8_DWCONV_MINMAX_FP32_UP2X9__SCALAR_MAGIC, c_lt_2) {
+TEST(QC8_DWCONV_MINMAX_FP32_UP2X9__SCALAR_FMAGIC, c_lt_2) {
for (uint32_t channels = 1; channels < 2; channels++) {
DWConvMicrokernelTester()
.cr(2)
.kr(9)
.channels(channels)
- .Test(xnn_qc8_dwconv_minmax_fp32_ukernel_up2x9__scalar_magic, xnn_init_qs8_minmax_scalar_magic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qc8_dwconv_minmax_fp32_ukernel_up2x9__scalar_fmagic, xnn_init_qs8_minmax_scalar_fmagic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
-TEST(QC8_DWCONV_MINMAX_FP32_UP2X9__SCALAR_MAGIC, c_gt_2) {
+TEST(QC8_DWCONV_MINMAX_FP32_UP2X9__SCALAR_FMAGIC, c_gt_2) {
for (uint32_t channels = 3; channels < 4; channels++) {
DWConvMicrokernelTester()
.cr(2)
.kr(9)
.channels(channels)
- .Test(xnn_qc8_dwconv_minmax_fp32_ukernel_up2x9__scalar_magic, xnn_init_qs8_minmax_scalar_magic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qc8_dwconv_minmax_fp32_ukernel_up2x9__scalar_fmagic, xnn_init_qs8_minmax_scalar_fmagic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
-TEST(QC8_DWCONV_MINMAX_FP32_UP2X9__SCALAR_MAGIC, c_gt_2_with_qmin) {
+TEST(QC8_DWCONV_MINMAX_FP32_UP2X9__SCALAR_FMAGIC, c_gt_2_with_qmin) {
for (uint32_t channels = 3; channels < 4; channels++) {
DWConvMicrokernelTester()
.cr(2)
.kr(9)
.channels(channels)
.qmin(128)
- .Test(xnn_qc8_dwconv_minmax_fp32_ukernel_up2x9__scalar_magic, xnn_init_qs8_minmax_scalar_magic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qc8_dwconv_minmax_fp32_ukernel_up2x9__scalar_fmagic, xnn_init_qs8_minmax_scalar_fmagic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
-TEST(QC8_DWCONV_MINMAX_FP32_UP2X9__SCALAR_MAGIC, c_gt_2_with_qmax) {
+TEST(QC8_DWCONV_MINMAX_FP32_UP2X9__SCALAR_FMAGIC, c_gt_2_with_qmax) {
for (uint32_t channels = 3; channels < 4; channels++) {
DWConvMicrokernelTester()
.cr(2)
.kr(9)
.channels(channels)
.qmax(128)
- .Test(xnn_qc8_dwconv_minmax_fp32_ukernel_up2x9__scalar_magic, xnn_init_qs8_minmax_scalar_magic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qc8_dwconv_minmax_fp32_ukernel_up2x9__scalar_fmagic, xnn_init_qs8_minmax_scalar_fmagic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
-TEST(QC8_DWCONV_MINMAX_FP32_UP2X9__SCALAR_MAGIC, multipixel) {
+TEST(QC8_DWCONV_MINMAX_FP32_UP2X9__SCALAR_FMAGIC, multipixel) {
for (size_t channels = 1; channels <= 10; channels += 1) {
DWConvMicrokernelTester()
.cr(2)
.kr(9)
.channels(channels)
.width(3)
- .Test(xnn_qc8_dwconv_minmax_fp32_ukernel_up2x9__scalar_magic, xnn_init_qs8_minmax_scalar_magic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qc8_dwconv_minmax_fp32_ukernel_up2x9__scalar_fmagic, xnn_init_qs8_minmax_scalar_fmagic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
-TEST(QC8_DWCONV_MINMAX_FP32_UP2X9__SCALAR_MAGIC, multipixel_with_step) {
+TEST(QC8_DWCONV_MINMAX_FP32_UP2X9__SCALAR_FMAGIC, multipixel_with_step) {
for (size_t channels = 1; channels <= 10; channels += 1) {
for (size_t step = 2; step <= 9; step++) {
DWConvMicrokernelTester()
@@ -12524,12 +12524,12 @@
.channels(channels)
.width(3)
.step(step)
- .Test(xnn_qc8_dwconv_minmax_fp32_ukernel_up2x9__scalar_magic, xnn_init_qs8_minmax_scalar_magic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qc8_dwconv_minmax_fp32_ukernel_up2x9__scalar_fmagic, xnn_init_qs8_minmax_scalar_fmagic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
}
-TEST(QC8_DWCONV_MINMAX_FP32_UP2X9__SCALAR_MAGIC, multipixel_with_output_stride) {
+TEST(QC8_DWCONV_MINMAX_FP32_UP2X9__SCALAR_FMAGIC, multipixel_with_output_stride) {
for (size_t channels = 1; channels <= 10; channels += 1) {
DWConvMicrokernelTester()
.cr(2)
@@ -12537,11 +12537,11 @@
.channels(2)
.width(5)
.output_stride(13)
- .Test(xnn_qc8_dwconv_minmax_fp32_ukernel_up2x9__scalar_magic, xnn_init_qs8_minmax_scalar_magic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qc8_dwconv_minmax_fp32_ukernel_up2x9__scalar_fmagic, xnn_init_qs8_minmax_scalar_fmagic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
-TEST(QC8_DWCONV_MINMAX_FP32_UP2X9__SCALAR_MAGIC, multipixel_with_qmin) {
+TEST(QC8_DWCONV_MINMAX_FP32_UP2X9__SCALAR_FMAGIC, multipixel_with_qmin) {
for (size_t channels = 1; channels <= 10; channels += 1) {
DWConvMicrokernelTester()
.cr(2)
@@ -12549,11 +12549,11 @@
.channels(channels)
.width(3)
.qmin(128)
- .Test(xnn_qc8_dwconv_minmax_fp32_ukernel_up2x9__scalar_magic, xnn_init_qs8_minmax_scalar_magic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qc8_dwconv_minmax_fp32_ukernel_up2x9__scalar_fmagic, xnn_init_qs8_minmax_scalar_fmagic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
-TEST(QC8_DWCONV_MINMAX_FP32_UP2X9__SCALAR_MAGIC, multipixel_with_qmax) {
+TEST(QC8_DWCONV_MINMAX_FP32_UP2X9__SCALAR_FMAGIC, multipixel_with_qmax) {
for (size_t channels = 1; channels <= 10; channels += 1) {
DWConvMicrokernelTester()
.cr(2)
@@ -12561,22 +12561,22 @@
.channels(channels)
.width(3)
.qmax(128)
- .Test(xnn_qc8_dwconv_minmax_fp32_ukernel_up2x9__scalar_magic, xnn_init_qs8_minmax_scalar_magic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qc8_dwconv_minmax_fp32_ukernel_up2x9__scalar_fmagic, xnn_init_qs8_minmax_scalar_fmagic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
-TEST(QC8_DWCONV_MINMAX_FP32_UP2X9__SCALAR_MAGIC, input_offset) {
+TEST(QC8_DWCONV_MINMAX_FP32_UP2X9__SCALAR_FMAGIC, input_offset) {
for (uint32_t channels = 4; channels < 32; channels += 6) {
DWConvMicrokernelTester()
.cr(2)
.kr(9)
.channels(channels)
.input_offset(80)
- .Test(xnn_qc8_dwconv_minmax_fp32_ukernel_up2x9__scalar_magic, xnn_init_qs8_minmax_scalar_magic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qc8_dwconv_minmax_fp32_ukernel_up2x9__scalar_fmagic, xnn_init_qs8_minmax_scalar_fmagic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
-TEST(QC8_DWCONV_MINMAX_FP32_UP2X9__SCALAR_MAGIC, zero) {
+TEST(QC8_DWCONV_MINMAX_FP32_UP2X9__SCALAR_FMAGIC, zero) {
for (uint32_t mz = 0; mz < 9; mz++) {
for (uint32_t channels = 4; channels < 32; channels += 6) {
DWConvMicrokernelTester()
@@ -12585,105 +12585,105 @@
.channels(channels)
.input_offset(80)
.zero_index(mz)
- .Test(xnn_qc8_dwconv_minmax_fp32_ukernel_up2x9__scalar_magic, xnn_init_qs8_minmax_scalar_magic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qc8_dwconv_minmax_fp32_ukernel_up2x9__scalar_fmagic, xnn_init_qs8_minmax_scalar_fmagic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
}
-TEST(QC8_DWCONV_MINMAX_FP32_UP4X9__SCALAR_MAGIC, c_eq_4) {
+TEST(QC8_DWCONV_MINMAX_FP32_UP4X9__SCALAR_FMAGIC, c_eq_4) {
DWConvMicrokernelTester()
.cr(4)
.kr(9)
.channels(4)
- .Test(xnn_qc8_dwconv_minmax_fp32_ukernel_up4x9__scalar_magic, xnn_init_qs8_minmax_scalar_magic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qc8_dwconv_minmax_fp32_ukernel_up4x9__scalar_fmagic, xnn_init_qs8_minmax_scalar_fmagic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
-TEST(QC8_DWCONV_MINMAX_FP32_UP4X9__SCALAR_MAGIC, c_div_4) {
+TEST(QC8_DWCONV_MINMAX_FP32_UP4X9__SCALAR_FMAGIC, c_div_4) {
for (uint32_t channels = 8; channels < 64; channels += 12) {
DWConvMicrokernelTester()
.cr(4)
.kr(9)
.channels(channels)
- .Test(xnn_qc8_dwconv_minmax_fp32_ukernel_up4x9__scalar_magic, xnn_init_qs8_minmax_scalar_magic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qc8_dwconv_minmax_fp32_ukernel_up4x9__scalar_fmagic, xnn_init_qs8_minmax_scalar_fmagic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
-TEST(QC8_DWCONV_MINMAX_FP32_UP4X9__SCALAR_MAGIC, c_div_4_with_qmin) {
+TEST(QC8_DWCONV_MINMAX_FP32_UP4X9__SCALAR_FMAGIC, c_div_4_with_qmin) {
for (uint32_t channels = 8; channels < 64; channels += 12) {
DWConvMicrokernelTester()
.cr(4)
.kr(9)
.channels(channels)
.qmin(128)
- .Test(xnn_qc8_dwconv_minmax_fp32_ukernel_up4x9__scalar_magic, xnn_init_qs8_minmax_scalar_magic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qc8_dwconv_minmax_fp32_ukernel_up4x9__scalar_fmagic, xnn_init_qs8_minmax_scalar_fmagic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
-TEST(QC8_DWCONV_MINMAX_FP32_UP4X9__SCALAR_MAGIC, c_div_4_with_qmax) {
+TEST(QC8_DWCONV_MINMAX_FP32_UP4X9__SCALAR_FMAGIC, c_div_4_with_qmax) {
for (uint32_t channels = 8; channels < 64; channels += 12) {
DWConvMicrokernelTester()
.cr(4)
.kr(9)
.channels(channels)
.qmax(128)
- .Test(xnn_qc8_dwconv_minmax_fp32_ukernel_up4x9__scalar_magic, xnn_init_qs8_minmax_scalar_magic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qc8_dwconv_minmax_fp32_ukernel_up4x9__scalar_fmagic, xnn_init_qs8_minmax_scalar_fmagic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
-TEST(QC8_DWCONV_MINMAX_FP32_UP4X9__SCALAR_MAGIC, c_lt_4) {
+TEST(QC8_DWCONV_MINMAX_FP32_UP4X9__SCALAR_FMAGIC, c_lt_4) {
for (uint32_t channels = 1; channels < 4; channels++) {
DWConvMicrokernelTester()
.cr(4)
.kr(9)
.channels(channels)
- .Test(xnn_qc8_dwconv_minmax_fp32_ukernel_up4x9__scalar_magic, xnn_init_qs8_minmax_scalar_magic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qc8_dwconv_minmax_fp32_ukernel_up4x9__scalar_fmagic, xnn_init_qs8_minmax_scalar_fmagic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
-TEST(QC8_DWCONV_MINMAX_FP32_UP4X9__SCALAR_MAGIC, c_gt_4) {
+TEST(QC8_DWCONV_MINMAX_FP32_UP4X9__SCALAR_FMAGIC, c_gt_4) {
for (uint32_t channels = 5; channels < 8; channels++) {
DWConvMicrokernelTester()
.cr(4)
.kr(9)
.channels(channels)
- .Test(xnn_qc8_dwconv_minmax_fp32_ukernel_up4x9__scalar_magic, xnn_init_qs8_minmax_scalar_magic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qc8_dwconv_minmax_fp32_ukernel_up4x9__scalar_fmagic, xnn_init_qs8_minmax_scalar_fmagic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
-TEST(QC8_DWCONV_MINMAX_FP32_UP4X9__SCALAR_MAGIC, c_gt_4_with_qmin) {
+TEST(QC8_DWCONV_MINMAX_FP32_UP4X9__SCALAR_FMAGIC, c_gt_4_with_qmin) {
for (uint32_t channels = 5; channels < 8; channels++) {
DWConvMicrokernelTester()
.cr(4)
.kr(9)
.channels(channels)
.qmin(128)
- .Test(xnn_qc8_dwconv_minmax_fp32_ukernel_up4x9__scalar_magic, xnn_init_qs8_minmax_scalar_magic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qc8_dwconv_minmax_fp32_ukernel_up4x9__scalar_fmagic, xnn_init_qs8_minmax_scalar_fmagic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
-TEST(QC8_DWCONV_MINMAX_FP32_UP4X9__SCALAR_MAGIC, c_gt_4_with_qmax) {
+TEST(QC8_DWCONV_MINMAX_FP32_UP4X9__SCALAR_FMAGIC, c_gt_4_with_qmax) {
for (uint32_t channels = 5; channels < 8; channels++) {
DWConvMicrokernelTester()
.cr(4)
.kr(9)
.channels(channels)
.qmax(128)
- .Test(xnn_qc8_dwconv_minmax_fp32_ukernel_up4x9__scalar_magic, xnn_init_qs8_minmax_scalar_magic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qc8_dwconv_minmax_fp32_ukernel_up4x9__scalar_fmagic, xnn_init_qs8_minmax_scalar_fmagic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
-TEST(QC8_DWCONV_MINMAX_FP32_UP4X9__SCALAR_MAGIC, multipixel) {
+TEST(QC8_DWCONV_MINMAX_FP32_UP4X9__SCALAR_FMAGIC, multipixel) {
for (size_t channels = 1; channels <= 20; channels += 3) {
DWConvMicrokernelTester()
.cr(4)
.kr(9)
.channels(channels)
.width(3)
- .Test(xnn_qc8_dwconv_minmax_fp32_ukernel_up4x9__scalar_magic, xnn_init_qs8_minmax_scalar_magic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qc8_dwconv_minmax_fp32_ukernel_up4x9__scalar_fmagic, xnn_init_qs8_minmax_scalar_fmagic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
-TEST(QC8_DWCONV_MINMAX_FP32_UP4X9__SCALAR_MAGIC, multipixel_with_step) {
+TEST(QC8_DWCONV_MINMAX_FP32_UP4X9__SCALAR_FMAGIC, multipixel_with_step) {
for (size_t channels = 1; channels <= 20; channels += 3) {
for (size_t step = 2; step <= 9; step++) {
DWConvMicrokernelTester()
@@ -12692,12 +12692,12 @@
.channels(channels)
.width(3)
.step(step)
- .Test(xnn_qc8_dwconv_minmax_fp32_ukernel_up4x9__scalar_magic, xnn_init_qs8_minmax_scalar_magic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qc8_dwconv_minmax_fp32_ukernel_up4x9__scalar_fmagic, xnn_init_qs8_minmax_scalar_fmagic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
}
-TEST(QC8_DWCONV_MINMAX_FP32_UP4X9__SCALAR_MAGIC, multipixel_with_output_stride) {
+TEST(QC8_DWCONV_MINMAX_FP32_UP4X9__SCALAR_FMAGIC, multipixel_with_output_stride) {
for (size_t channels = 1; channels <= 20; channels += 3) {
DWConvMicrokernelTester()
.cr(4)
@@ -12705,11 +12705,11 @@
.channels(4)
.width(5)
.output_stride(23)
- .Test(xnn_qc8_dwconv_minmax_fp32_ukernel_up4x9__scalar_magic, xnn_init_qs8_minmax_scalar_magic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qc8_dwconv_minmax_fp32_ukernel_up4x9__scalar_fmagic, xnn_init_qs8_minmax_scalar_fmagic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
-TEST(QC8_DWCONV_MINMAX_FP32_UP4X9__SCALAR_MAGIC, multipixel_with_qmin) {
+TEST(QC8_DWCONV_MINMAX_FP32_UP4X9__SCALAR_FMAGIC, multipixel_with_qmin) {
for (size_t channels = 1; channels <= 20; channels += 3) {
DWConvMicrokernelTester()
.cr(4)
@@ -12717,11 +12717,11 @@
.channels(channels)
.width(3)
.qmin(128)
- .Test(xnn_qc8_dwconv_minmax_fp32_ukernel_up4x9__scalar_magic, xnn_init_qs8_minmax_scalar_magic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qc8_dwconv_minmax_fp32_ukernel_up4x9__scalar_fmagic, xnn_init_qs8_minmax_scalar_fmagic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
-TEST(QC8_DWCONV_MINMAX_FP32_UP4X9__SCALAR_MAGIC, multipixel_with_qmax) {
+TEST(QC8_DWCONV_MINMAX_FP32_UP4X9__SCALAR_FMAGIC, multipixel_with_qmax) {
for (size_t channels = 1; channels <= 20; channels += 3) {
DWConvMicrokernelTester()
.cr(4)
@@ -12729,22 +12729,22 @@
.channels(channels)
.width(3)
.qmax(128)
- .Test(xnn_qc8_dwconv_minmax_fp32_ukernel_up4x9__scalar_magic, xnn_init_qs8_minmax_scalar_magic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qc8_dwconv_minmax_fp32_ukernel_up4x9__scalar_fmagic, xnn_init_qs8_minmax_scalar_fmagic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
-TEST(QC8_DWCONV_MINMAX_FP32_UP4X9__SCALAR_MAGIC, input_offset) {
+TEST(QC8_DWCONV_MINMAX_FP32_UP4X9__SCALAR_FMAGIC, input_offset) {
for (uint32_t channels = 8; channels < 64; channels += 12) {
DWConvMicrokernelTester()
.cr(4)
.kr(9)
.channels(channels)
.input_offset(112)
- .Test(xnn_qc8_dwconv_minmax_fp32_ukernel_up4x9__scalar_magic, xnn_init_qs8_minmax_scalar_magic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qc8_dwconv_minmax_fp32_ukernel_up4x9__scalar_fmagic, xnn_init_qs8_minmax_scalar_fmagic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
-TEST(QC8_DWCONV_MINMAX_FP32_UP4X9__SCALAR_MAGIC, zero) {
+TEST(QC8_DWCONV_MINMAX_FP32_UP4X9__SCALAR_FMAGIC, zero) {
for (uint32_t mz = 0; mz < 9; mz++) {
for (uint32_t channels = 8; channels < 64; channels += 12) {
DWConvMicrokernelTester()
@@ -12753,7 +12753,7 @@
.channels(channels)
.input_offset(112)
.zero_index(mz)
- .Test(xnn_qc8_dwconv_minmax_fp32_ukernel_up4x9__scalar_magic, xnn_init_qs8_minmax_scalar_magic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qc8_dwconv_minmax_fp32_ukernel_up4x9__scalar_fmagic, xnn_init_qs8_minmax_scalar_fmagic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
}
@@ -24572,58 +24572,58 @@
#endif // XNN_ARCH_WASMSIMD || XNN_ARCH_WASMRELAXEDSIMD
-TEST(QC8_DWCONV_MINMAX_FP32_UP1X25__SCALAR_LRINT, c_eq_1) {
+TEST(QC8_DWCONV_MINMAX_FP32_UP1X25__SCALAR_LRINTF, c_eq_1) {
DWConvMicrokernelTester()
.cr(1)
.kr(25)
.channels(1)
- .Test(xnn_qc8_dwconv_minmax_fp32_ukernel_up1x25__scalar_lrint, xnn_init_qs8_minmax_scalar_lrint_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qc8_dwconv_minmax_fp32_ukernel_up1x25__scalar_lrintf, xnn_init_qs8_minmax_scalar_lrintf_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
-TEST(QC8_DWCONV_MINMAX_FP32_UP1X25__SCALAR_LRINT, c_gt_1) {
+TEST(QC8_DWCONV_MINMAX_FP32_UP1X25__SCALAR_LRINTF, c_gt_1) {
for (uint32_t channels = 2; channels < 10; channels++) {
DWConvMicrokernelTester()
.cr(1)
.kr(25)
.channels(channels)
- .Test(xnn_qc8_dwconv_minmax_fp32_ukernel_up1x25__scalar_lrint, xnn_init_qs8_minmax_scalar_lrint_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qc8_dwconv_minmax_fp32_ukernel_up1x25__scalar_lrintf, xnn_init_qs8_minmax_scalar_lrintf_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
-TEST(QC8_DWCONV_MINMAX_FP32_UP1X25__SCALAR_LRINT, c_gt_1_with_qmin) {
+TEST(QC8_DWCONV_MINMAX_FP32_UP1X25__SCALAR_LRINTF, c_gt_1_with_qmin) {
for (uint32_t channels = 2; channels < 10; channels++) {
DWConvMicrokernelTester()
.cr(1)
.kr(25)
.channels(channels)
.qmin(128)
- .Test(xnn_qc8_dwconv_minmax_fp32_ukernel_up1x25__scalar_lrint, xnn_init_qs8_minmax_scalar_lrint_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qc8_dwconv_minmax_fp32_ukernel_up1x25__scalar_lrintf, xnn_init_qs8_minmax_scalar_lrintf_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
-TEST(QC8_DWCONV_MINMAX_FP32_UP1X25__SCALAR_LRINT, c_gt_1_with_qmax) {
+TEST(QC8_DWCONV_MINMAX_FP32_UP1X25__SCALAR_LRINTF, c_gt_1_with_qmax) {
for (uint32_t channels = 2; channels < 10; channels++) {
DWConvMicrokernelTester()
.cr(1)
.kr(25)
.channels(channels)
.qmax(128)
- .Test(xnn_qc8_dwconv_minmax_fp32_ukernel_up1x25__scalar_lrint, xnn_init_qs8_minmax_scalar_lrint_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qc8_dwconv_minmax_fp32_ukernel_up1x25__scalar_lrintf, xnn_init_qs8_minmax_scalar_lrintf_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
-TEST(QC8_DWCONV_MINMAX_FP32_UP1X25__SCALAR_LRINT, multipixel) {
+TEST(QC8_DWCONV_MINMAX_FP32_UP1X25__SCALAR_LRINTF, multipixel) {
for (size_t channels = 1; channels <= 5; channels += 1) {
DWConvMicrokernelTester()
.cr(1)
.kr(25)
.channels(channels)
.width(3)
- .Test(xnn_qc8_dwconv_minmax_fp32_ukernel_up1x25__scalar_lrint, xnn_init_qs8_minmax_scalar_lrint_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qc8_dwconv_minmax_fp32_ukernel_up1x25__scalar_lrintf, xnn_init_qs8_minmax_scalar_lrintf_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
-TEST(QC8_DWCONV_MINMAX_FP32_UP1X25__SCALAR_LRINT, multipixel_with_step) {
+TEST(QC8_DWCONV_MINMAX_FP32_UP1X25__SCALAR_LRINTF, multipixel_with_step) {
for (size_t channels = 1; channels <= 5; channels += 1) {
for (size_t step = 2; step <= 25; step++) {
DWConvMicrokernelTester()
@@ -24632,12 +24632,12 @@
.channels(channels)
.width(3)
.step(step)
- .Test(xnn_qc8_dwconv_minmax_fp32_ukernel_up1x25__scalar_lrint, xnn_init_qs8_minmax_scalar_lrint_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qc8_dwconv_minmax_fp32_ukernel_up1x25__scalar_lrintf, xnn_init_qs8_minmax_scalar_lrintf_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
}
-TEST(QC8_DWCONV_MINMAX_FP32_UP1X25__SCALAR_LRINT, multipixel_with_output_stride) {
+TEST(QC8_DWCONV_MINMAX_FP32_UP1X25__SCALAR_LRINTF, multipixel_with_output_stride) {
for (size_t channels = 1; channels <= 5; channels += 1) {
DWConvMicrokernelTester()
.cr(1)
@@ -24645,11 +24645,11 @@
.channels(1)
.width(5)
.output_stride(7)
- .Test(xnn_qc8_dwconv_minmax_fp32_ukernel_up1x25__scalar_lrint, xnn_init_qs8_minmax_scalar_lrint_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qc8_dwconv_minmax_fp32_ukernel_up1x25__scalar_lrintf, xnn_init_qs8_minmax_scalar_lrintf_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
-TEST(QC8_DWCONV_MINMAX_FP32_UP1X25__SCALAR_LRINT, multipixel_with_qmin) {
+TEST(QC8_DWCONV_MINMAX_FP32_UP1X25__SCALAR_LRINTF, multipixel_with_qmin) {
for (size_t channels = 1; channels <= 5; channels += 1) {
DWConvMicrokernelTester()
.cr(1)
@@ -24657,11 +24657,11 @@
.channels(channels)
.width(3)
.qmin(128)
- .Test(xnn_qc8_dwconv_minmax_fp32_ukernel_up1x25__scalar_lrint, xnn_init_qs8_minmax_scalar_lrint_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qc8_dwconv_minmax_fp32_ukernel_up1x25__scalar_lrintf, xnn_init_qs8_minmax_scalar_lrintf_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
-TEST(QC8_DWCONV_MINMAX_FP32_UP1X25__SCALAR_LRINT, multipixel_with_qmax) {
+TEST(QC8_DWCONV_MINMAX_FP32_UP1X25__SCALAR_LRINTF, multipixel_with_qmax) {
for (size_t channels = 1; channels <= 5; channels += 1) {
DWConvMicrokernelTester()
.cr(1)
@@ -24669,22 +24669,22 @@
.channels(channels)
.width(3)
.qmax(128)
- .Test(xnn_qc8_dwconv_minmax_fp32_ukernel_up1x25__scalar_lrint, xnn_init_qs8_minmax_scalar_lrint_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qc8_dwconv_minmax_fp32_ukernel_up1x25__scalar_lrintf, xnn_init_qs8_minmax_scalar_lrintf_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
-TEST(QC8_DWCONV_MINMAX_FP32_UP1X25__SCALAR_LRINT, input_offset) {
+TEST(QC8_DWCONV_MINMAX_FP32_UP1X25__SCALAR_LRINTF, input_offset) {
for (uint32_t channels = 2; channels < 16; channels += 3) {
DWConvMicrokernelTester()
.cr(1)
.kr(25)
.channels(channels)
.input_offset(48)
- .Test(xnn_qc8_dwconv_minmax_fp32_ukernel_up1x25__scalar_lrint, xnn_init_qs8_minmax_scalar_lrint_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qc8_dwconv_minmax_fp32_ukernel_up1x25__scalar_lrintf, xnn_init_qs8_minmax_scalar_lrintf_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
-TEST(QC8_DWCONV_MINMAX_FP32_UP1X25__SCALAR_LRINT, zero) {
+TEST(QC8_DWCONV_MINMAX_FP32_UP1X25__SCALAR_LRINTF, zero) {
for (uint32_t mz = 0; mz < 25; mz++) {
for (uint32_t channels = 2; channels < 16; channels += 3) {
DWConvMicrokernelTester()
@@ -24693,105 +24693,105 @@
.channels(channels)
.input_offset(48)
.zero_index(mz)
- .Test(xnn_qc8_dwconv_minmax_fp32_ukernel_up1x25__scalar_lrint, xnn_init_qs8_minmax_scalar_lrint_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qc8_dwconv_minmax_fp32_ukernel_up1x25__scalar_lrintf, xnn_init_qs8_minmax_scalar_lrintf_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
}
-TEST(QC8_DWCONV_MINMAX_FP32_UP2X25__SCALAR_LRINT, c_eq_2) {
+TEST(QC8_DWCONV_MINMAX_FP32_UP2X25__SCALAR_LRINTF, c_eq_2) {
DWConvMicrokernelTester()
.cr(2)
.kr(25)
.channels(2)
- .Test(xnn_qc8_dwconv_minmax_fp32_ukernel_up2x25__scalar_lrint, xnn_init_qs8_minmax_scalar_lrint_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qc8_dwconv_minmax_fp32_ukernel_up2x25__scalar_lrintf, xnn_init_qs8_minmax_scalar_lrintf_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
-TEST(QC8_DWCONV_MINMAX_FP32_UP2X25__SCALAR_LRINT, c_div_2) {
+TEST(QC8_DWCONV_MINMAX_FP32_UP2X25__SCALAR_LRINTF, c_div_2) {
for (uint32_t channels = 4; channels < 32; channels += 6) {
DWConvMicrokernelTester()
.cr(2)
.kr(25)
.channels(channels)
- .Test(xnn_qc8_dwconv_minmax_fp32_ukernel_up2x25__scalar_lrint, xnn_init_qs8_minmax_scalar_lrint_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qc8_dwconv_minmax_fp32_ukernel_up2x25__scalar_lrintf, xnn_init_qs8_minmax_scalar_lrintf_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
-TEST(QC8_DWCONV_MINMAX_FP32_UP2X25__SCALAR_LRINT, c_div_2_with_qmin) {
+TEST(QC8_DWCONV_MINMAX_FP32_UP2X25__SCALAR_LRINTF, c_div_2_with_qmin) {
for (uint32_t channels = 4; channels < 32; channels += 6) {
DWConvMicrokernelTester()
.cr(2)
.kr(25)
.channels(channels)
.qmin(128)
- .Test(xnn_qc8_dwconv_minmax_fp32_ukernel_up2x25__scalar_lrint, xnn_init_qs8_minmax_scalar_lrint_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qc8_dwconv_minmax_fp32_ukernel_up2x25__scalar_lrintf, xnn_init_qs8_minmax_scalar_lrintf_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
-TEST(QC8_DWCONV_MINMAX_FP32_UP2X25__SCALAR_LRINT, c_div_2_with_qmax) {
+TEST(QC8_DWCONV_MINMAX_FP32_UP2X25__SCALAR_LRINTF, c_div_2_with_qmax) {
for (uint32_t channels = 4; channels < 32; channels += 6) {
DWConvMicrokernelTester()
.cr(2)
.kr(25)
.channels(channels)
.qmax(128)
- .Test(xnn_qc8_dwconv_minmax_fp32_ukernel_up2x25__scalar_lrint, xnn_init_qs8_minmax_scalar_lrint_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qc8_dwconv_minmax_fp32_ukernel_up2x25__scalar_lrintf, xnn_init_qs8_minmax_scalar_lrintf_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
-TEST(QC8_DWCONV_MINMAX_FP32_UP2X25__SCALAR_LRINT, c_lt_2) {
+TEST(QC8_DWCONV_MINMAX_FP32_UP2X25__SCALAR_LRINTF, c_lt_2) {
for (uint32_t channels = 1; channels < 2; channels++) {
DWConvMicrokernelTester()
.cr(2)
.kr(25)
.channels(channels)
- .Test(xnn_qc8_dwconv_minmax_fp32_ukernel_up2x25__scalar_lrint, xnn_init_qs8_minmax_scalar_lrint_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qc8_dwconv_minmax_fp32_ukernel_up2x25__scalar_lrintf, xnn_init_qs8_minmax_scalar_lrintf_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
-TEST(QC8_DWCONV_MINMAX_FP32_UP2X25__SCALAR_LRINT, c_gt_2) {
+TEST(QC8_DWCONV_MINMAX_FP32_UP2X25__SCALAR_LRINTF, c_gt_2) {
for (uint32_t channels = 3; channels < 4; channels++) {
DWConvMicrokernelTester()
.cr(2)
.kr(25)
.channels(channels)
- .Test(xnn_qc8_dwconv_minmax_fp32_ukernel_up2x25__scalar_lrint, xnn_init_qs8_minmax_scalar_lrint_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qc8_dwconv_minmax_fp32_ukernel_up2x25__scalar_lrintf, xnn_init_qs8_minmax_scalar_lrintf_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
-TEST(QC8_DWCONV_MINMAX_FP32_UP2X25__SCALAR_LRINT, c_gt_2_with_qmin) {
+TEST(QC8_DWCONV_MINMAX_FP32_UP2X25__SCALAR_LRINTF, c_gt_2_with_qmin) {
for (uint32_t channels = 3; channels < 4; channels++) {
DWConvMicrokernelTester()
.cr(2)
.kr(25)
.channels(channels)
.qmin(128)
- .Test(xnn_qc8_dwconv_minmax_fp32_ukernel_up2x25__scalar_lrint, xnn_init_qs8_minmax_scalar_lrint_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qc8_dwconv_minmax_fp32_ukernel_up2x25__scalar_lrintf, xnn_init_qs8_minmax_scalar_lrintf_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
-TEST(QC8_DWCONV_MINMAX_FP32_UP2X25__SCALAR_LRINT, c_gt_2_with_qmax) {
+TEST(QC8_DWCONV_MINMAX_FP32_UP2X25__SCALAR_LRINTF, c_gt_2_with_qmax) {
for (uint32_t channels = 3; channels < 4; channels++) {
DWConvMicrokernelTester()
.cr(2)
.kr(25)
.channels(channels)
.qmax(128)
- .Test(xnn_qc8_dwconv_minmax_fp32_ukernel_up2x25__scalar_lrint, xnn_init_qs8_minmax_scalar_lrint_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qc8_dwconv_minmax_fp32_ukernel_up2x25__scalar_lrintf, xnn_init_qs8_minmax_scalar_lrintf_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
-TEST(QC8_DWCONV_MINMAX_FP32_UP2X25__SCALAR_LRINT, multipixel) {
+TEST(QC8_DWCONV_MINMAX_FP32_UP2X25__SCALAR_LRINTF, multipixel) {
for (size_t channels = 1; channels <= 10; channels += 1) {
DWConvMicrokernelTester()
.cr(2)
.kr(25)
.channels(channels)
.width(3)
- .Test(xnn_qc8_dwconv_minmax_fp32_ukernel_up2x25__scalar_lrint, xnn_init_qs8_minmax_scalar_lrint_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qc8_dwconv_minmax_fp32_ukernel_up2x25__scalar_lrintf, xnn_init_qs8_minmax_scalar_lrintf_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
-TEST(QC8_DWCONV_MINMAX_FP32_UP2X25__SCALAR_LRINT, multipixel_with_step) {
+TEST(QC8_DWCONV_MINMAX_FP32_UP2X25__SCALAR_LRINTF, multipixel_with_step) {
for (size_t channels = 1; channels <= 10; channels += 1) {
for (size_t step = 2; step <= 25; step++) {
DWConvMicrokernelTester()
@@ -24800,12 +24800,12 @@
.channels(channels)
.width(3)
.step(step)
- .Test(xnn_qc8_dwconv_minmax_fp32_ukernel_up2x25__scalar_lrint, xnn_init_qs8_minmax_scalar_lrint_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qc8_dwconv_minmax_fp32_ukernel_up2x25__scalar_lrintf, xnn_init_qs8_minmax_scalar_lrintf_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
}
-TEST(QC8_DWCONV_MINMAX_FP32_UP2X25__SCALAR_LRINT, multipixel_with_output_stride) {
+TEST(QC8_DWCONV_MINMAX_FP32_UP2X25__SCALAR_LRINTF, multipixel_with_output_stride) {
for (size_t channels = 1; channels <= 10; channels += 1) {
DWConvMicrokernelTester()
.cr(2)
@@ -24813,11 +24813,11 @@
.channels(2)
.width(5)
.output_stride(13)
- .Test(xnn_qc8_dwconv_minmax_fp32_ukernel_up2x25__scalar_lrint, xnn_init_qs8_minmax_scalar_lrint_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qc8_dwconv_minmax_fp32_ukernel_up2x25__scalar_lrintf, xnn_init_qs8_minmax_scalar_lrintf_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
-TEST(QC8_DWCONV_MINMAX_FP32_UP2X25__SCALAR_LRINT, multipixel_with_qmin) {
+TEST(QC8_DWCONV_MINMAX_FP32_UP2X25__SCALAR_LRINTF, multipixel_with_qmin) {
for (size_t channels = 1; channels <= 10; channels += 1) {
DWConvMicrokernelTester()
.cr(2)
@@ -24825,11 +24825,11 @@
.channels(channels)
.width(3)
.qmin(128)
- .Test(xnn_qc8_dwconv_minmax_fp32_ukernel_up2x25__scalar_lrint, xnn_init_qs8_minmax_scalar_lrint_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qc8_dwconv_minmax_fp32_ukernel_up2x25__scalar_lrintf, xnn_init_qs8_minmax_scalar_lrintf_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
-TEST(QC8_DWCONV_MINMAX_FP32_UP2X25__SCALAR_LRINT, multipixel_with_qmax) {
+TEST(QC8_DWCONV_MINMAX_FP32_UP2X25__SCALAR_LRINTF, multipixel_with_qmax) {
for (size_t channels = 1; channels <= 10; channels += 1) {
DWConvMicrokernelTester()
.cr(2)
@@ -24837,22 +24837,22 @@
.channels(channels)
.width(3)
.qmax(128)
- .Test(xnn_qc8_dwconv_minmax_fp32_ukernel_up2x25__scalar_lrint, xnn_init_qs8_minmax_scalar_lrint_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qc8_dwconv_minmax_fp32_ukernel_up2x25__scalar_lrintf, xnn_init_qs8_minmax_scalar_lrintf_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
-TEST(QC8_DWCONV_MINMAX_FP32_UP2X25__SCALAR_LRINT, input_offset) {
+TEST(QC8_DWCONV_MINMAX_FP32_UP2X25__SCALAR_LRINTF, input_offset) {
for (uint32_t channels = 4; channels < 32; channels += 6) {
DWConvMicrokernelTester()
.cr(2)
.kr(25)
.channels(channels)
.input_offset(80)
- .Test(xnn_qc8_dwconv_minmax_fp32_ukernel_up2x25__scalar_lrint, xnn_init_qs8_minmax_scalar_lrint_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qc8_dwconv_minmax_fp32_ukernel_up2x25__scalar_lrintf, xnn_init_qs8_minmax_scalar_lrintf_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
-TEST(QC8_DWCONV_MINMAX_FP32_UP2X25__SCALAR_LRINT, zero) {
+TEST(QC8_DWCONV_MINMAX_FP32_UP2X25__SCALAR_LRINTF, zero) {
for (uint32_t mz = 0; mz < 25; mz++) {
for (uint32_t channels = 4; channels < 32; channels += 6) {
DWConvMicrokernelTester()
@@ -24861,105 +24861,105 @@
.channels(channels)
.input_offset(80)
.zero_index(mz)
- .Test(xnn_qc8_dwconv_minmax_fp32_ukernel_up2x25__scalar_lrint, xnn_init_qs8_minmax_scalar_lrint_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qc8_dwconv_minmax_fp32_ukernel_up2x25__scalar_lrintf, xnn_init_qs8_minmax_scalar_lrintf_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
}
-TEST(QC8_DWCONV_MINMAX_FP32_UP4X25__SCALAR_LRINT, c_eq_4) {
+TEST(QC8_DWCONV_MINMAX_FP32_UP4X25__SCALAR_LRINTF, c_eq_4) {
DWConvMicrokernelTester()
.cr(4)
.kr(25)
.channels(4)
- .Test(xnn_qc8_dwconv_minmax_fp32_ukernel_up4x25__scalar_lrint, xnn_init_qs8_minmax_scalar_lrint_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qc8_dwconv_minmax_fp32_ukernel_up4x25__scalar_lrintf, xnn_init_qs8_minmax_scalar_lrintf_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
-TEST(QC8_DWCONV_MINMAX_FP32_UP4X25__SCALAR_LRINT, c_div_4) {
+TEST(QC8_DWCONV_MINMAX_FP32_UP4X25__SCALAR_LRINTF, c_div_4) {
for (uint32_t channels = 8; channels < 64; channels += 12) {
DWConvMicrokernelTester()
.cr(4)
.kr(25)
.channels(channels)
- .Test(xnn_qc8_dwconv_minmax_fp32_ukernel_up4x25__scalar_lrint, xnn_init_qs8_minmax_scalar_lrint_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qc8_dwconv_minmax_fp32_ukernel_up4x25__scalar_lrintf, xnn_init_qs8_minmax_scalar_lrintf_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
-TEST(QC8_DWCONV_MINMAX_FP32_UP4X25__SCALAR_LRINT, c_div_4_with_qmin) {
+TEST(QC8_DWCONV_MINMAX_FP32_UP4X25__SCALAR_LRINTF, c_div_4_with_qmin) {
for (uint32_t channels = 8; channels < 64; channels += 12) {
DWConvMicrokernelTester()
.cr(4)
.kr(25)
.channels(channels)
.qmin(128)
- .Test(xnn_qc8_dwconv_minmax_fp32_ukernel_up4x25__scalar_lrint, xnn_init_qs8_minmax_scalar_lrint_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qc8_dwconv_minmax_fp32_ukernel_up4x25__scalar_lrintf, xnn_init_qs8_minmax_scalar_lrintf_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
-TEST(QC8_DWCONV_MINMAX_FP32_UP4X25__SCALAR_LRINT, c_div_4_with_qmax) {
+TEST(QC8_DWCONV_MINMAX_FP32_UP4X25__SCALAR_LRINTF, c_div_4_with_qmax) {
for (uint32_t channels = 8; channels < 64; channels += 12) {
DWConvMicrokernelTester()
.cr(4)
.kr(25)
.channels(channels)
.qmax(128)
- .Test(xnn_qc8_dwconv_minmax_fp32_ukernel_up4x25__scalar_lrint, xnn_init_qs8_minmax_scalar_lrint_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qc8_dwconv_minmax_fp32_ukernel_up4x25__scalar_lrintf, xnn_init_qs8_minmax_scalar_lrintf_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
-TEST(QC8_DWCONV_MINMAX_FP32_UP4X25__SCALAR_LRINT, c_lt_4) {
+TEST(QC8_DWCONV_MINMAX_FP32_UP4X25__SCALAR_LRINTF, c_lt_4) {
for (uint32_t channels = 1; channels < 4; channels++) {
DWConvMicrokernelTester()
.cr(4)
.kr(25)
.channels(channels)
- .Test(xnn_qc8_dwconv_minmax_fp32_ukernel_up4x25__scalar_lrint, xnn_init_qs8_minmax_scalar_lrint_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qc8_dwconv_minmax_fp32_ukernel_up4x25__scalar_lrintf, xnn_init_qs8_minmax_scalar_lrintf_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
-TEST(QC8_DWCONV_MINMAX_FP32_UP4X25__SCALAR_LRINT, c_gt_4) {
+TEST(QC8_DWCONV_MINMAX_FP32_UP4X25__SCALAR_LRINTF, c_gt_4) {
for (uint32_t channels = 5; channels < 8; channels++) {
DWConvMicrokernelTester()
.cr(4)
.kr(25)
.channels(channels)
- .Test(xnn_qc8_dwconv_minmax_fp32_ukernel_up4x25__scalar_lrint, xnn_init_qs8_minmax_scalar_lrint_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qc8_dwconv_minmax_fp32_ukernel_up4x25__scalar_lrintf, xnn_init_qs8_minmax_scalar_lrintf_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
-TEST(QC8_DWCONV_MINMAX_FP32_UP4X25__SCALAR_LRINT, c_gt_4_with_qmin) {
+TEST(QC8_DWCONV_MINMAX_FP32_UP4X25__SCALAR_LRINTF, c_gt_4_with_qmin) {
for (uint32_t channels = 5; channels < 8; channels++) {
DWConvMicrokernelTester()
.cr(4)
.kr(25)
.channels(channels)
.qmin(128)
- .Test(xnn_qc8_dwconv_minmax_fp32_ukernel_up4x25__scalar_lrint, xnn_init_qs8_minmax_scalar_lrint_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qc8_dwconv_minmax_fp32_ukernel_up4x25__scalar_lrintf, xnn_init_qs8_minmax_scalar_lrintf_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
-TEST(QC8_DWCONV_MINMAX_FP32_UP4X25__SCALAR_LRINT, c_gt_4_with_qmax) {
+TEST(QC8_DWCONV_MINMAX_FP32_UP4X25__SCALAR_LRINTF, c_gt_4_with_qmax) {
for (uint32_t channels = 5; channels < 8; channels++) {
DWConvMicrokernelTester()
.cr(4)
.kr(25)
.channels(channels)
.qmax(128)
- .Test(xnn_qc8_dwconv_minmax_fp32_ukernel_up4x25__scalar_lrint, xnn_init_qs8_minmax_scalar_lrint_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qc8_dwconv_minmax_fp32_ukernel_up4x25__scalar_lrintf, xnn_init_qs8_minmax_scalar_lrintf_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
-TEST(QC8_DWCONV_MINMAX_FP32_UP4X25__SCALAR_LRINT, multipixel) {
+TEST(QC8_DWCONV_MINMAX_FP32_UP4X25__SCALAR_LRINTF, multipixel) {
for (size_t channels = 1; channels <= 20; channels += 3) {
DWConvMicrokernelTester()
.cr(4)
.kr(25)
.channels(channels)
.width(3)
- .Test(xnn_qc8_dwconv_minmax_fp32_ukernel_up4x25__scalar_lrint, xnn_init_qs8_minmax_scalar_lrint_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qc8_dwconv_minmax_fp32_ukernel_up4x25__scalar_lrintf, xnn_init_qs8_minmax_scalar_lrintf_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
-TEST(QC8_DWCONV_MINMAX_FP32_UP4X25__SCALAR_LRINT, multipixel_with_step) {
+TEST(QC8_DWCONV_MINMAX_FP32_UP4X25__SCALAR_LRINTF, multipixel_with_step) {
for (size_t channels = 1; channels <= 20; channels += 3) {
for (size_t step = 2; step <= 25; step++) {
DWConvMicrokernelTester()
@@ -24968,12 +24968,12 @@
.channels(channels)
.width(3)
.step(step)
- .Test(xnn_qc8_dwconv_minmax_fp32_ukernel_up4x25__scalar_lrint, xnn_init_qs8_minmax_scalar_lrint_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qc8_dwconv_minmax_fp32_ukernel_up4x25__scalar_lrintf, xnn_init_qs8_minmax_scalar_lrintf_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
}
-TEST(QC8_DWCONV_MINMAX_FP32_UP4X25__SCALAR_LRINT, multipixel_with_output_stride) {
+TEST(QC8_DWCONV_MINMAX_FP32_UP4X25__SCALAR_LRINTF, multipixel_with_output_stride) {
for (size_t channels = 1; channels <= 20; channels += 3) {
DWConvMicrokernelTester()
.cr(4)
@@ -24981,11 +24981,11 @@
.channels(4)
.width(5)
.output_stride(23)
- .Test(xnn_qc8_dwconv_minmax_fp32_ukernel_up4x25__scalar_lrint, xnn_init_qs8_minmax_scalar_lrint_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qc8_dwconv_minmax_fp32_ukernel_up4x25__scalar_lrintf, xnn_init_qs8_minmax_scalar_lrintf_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
-TEST(QC8_DWCONV_MINMAX_FP32_UP4X25__SCALAR_LRINT, multipixel_with_qmin) {
+TEST(QC8_DWCONV_MINMAX_FP32_UP4X25__SCALAR_LRINTF, multipixel_with_qmin) {
for (size_t channels = 1; channels <= 20; channels += 3) {
DWConvMicrokernelTester()
.cr(4)
@@ -24993,11 +24993,11 @@
.channels(channels)
.width(3)
.qmin(128)
- .Test(xnn_qc8_dwconv_minmax_fp32_ukernel_up4x25__scalar_lrint, xnn_init_qs8_minmax_scalar_lrint_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qc8_dwconv_minmax_fp32_ukernel_up4x25__scalar_lrintf, xnn_init_qs8_minmax_scalar_lrintf_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
-TEST(QC8_DWCONV_MINMAX_FP32_UP4X25__SCALAR_LRINT, multipixel_with_qmax) {
+TEST(QC8_DWCONV_MINMAX_FP32_UP4X25__SCALAR_LRINTF, multipixel_with_qmax) {
for (size_t channels = 1; channels <= 20; channels += 3) {
DWConvMicrokernelTester()
.cr(4)
@@ -25005,22 +25005,22 @@
.channels(channels)
.width(3)
.qmax(128)
- .Test(xnn_qc8_dwconv_minmax_fp32_ukernel_up4x25__scalar_lrint, xnn_init_qs8_minmax_scalar_lrint_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qc8_dwconv_minmax_fp32_ukernel_up4x25__scalar_lrintf, xnn_init_qs8_minmax_scalar_lrintf_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
-TEST(QC8_DWCONV_MINMAX_FP32_UP4X25__SCALAR_LRINT, input_offset) {
+TEST(QC8_DWCONV_MINMAX_FP32_UP4X25__SCALAR_LRINTF, input_offset) {
for (uint32_t channels = 8; channels < 64; channels += 12) {
DWConvMicrokernelTester()
.cr(4)
.kr(25)
.channels(channels)
.input_offset(112)
- .Test(xnn_qc8_dwconv_minmax_fp32_ukernel_up4x25__scalar_lrint, xnn_init_qs8_minmax_scalar_lrint_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qc8_dwconv_minmax_fp32_ukernel_up4x25__scalar_lrintf, xnn_init_qs8_minmax_scalar_lrintf_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
-TEST(QC8_DWCONV_MINMAX_FP32_UP4X25__SCALAR_LRINT, zero) {
+TEST(QC8_DWCONV_MINMAX_FP32_UP4X25__SCALAR_LRINTF, zero) {
for (uint32_t mz = 0; mz < 25; mz++) {
for (uint32_t channels = 8; channels < 64; channels += 12) {
DWConvMicrokernelTester()
@@ -25029,63 +25029,63 @@
.channels(channels)
.input_offset(112)
.zero_index(mz)
- .Test(xnn_qc8_dwconv_minmax_fp32_ukernel_up4x25__scalar_lrint, xnn_init_qs8_minmax_scalar_lrint_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qc8_dwconv_minmax_fp32_ukernel_up4x25__scalar_lrintf, xnn_init_qs8_minmax_scalar_lrintf_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
}
-TEST(QC8_DWCONV_MINMAX_FP32_UP1X25__SCALAR_MAGIC, c_eq_1) {
+TEST(QC8_DWCONV_MINMAX_FP32_UP1X25__SCALAR_FMAGIC, c_eq_1) {
DWConvMicrokernelTester()
.cr(1)
.kr(25)
.channels(1)
- .Test(xnn_qc8_dwconv_minmax_fp32_ukernel_up1x25__scalar_magic, xnn_init_qs8_minmax_scalar_magic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qc8_dwconv_minmax_fp32_ukernel_up1x25__scalar_fmagic, xnn_init_qs8_minmax_scalar_fmagic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
-TEST(QC8_DWCONV_MINMAX_FP32_UP1X25__SCALAR_MAGIC, c_gt_1) {
+TEST(QC8_DWCONV_MINMAX_FP32_UP1X25__SCALAR_FMAGIC, c_gt_1) {
for (uint32_t channels = 2; channels < 10; channels++) {
DWConvMicrokernelTester()
.cr(1)
.kr(25)
.channels(channels)
- .Test(xnn_qc8_dwconv_minmax_fp32_ukernel_up1x25__scalar_magic, xnn_init_qs8_minmax_scalar_magic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qc8_dwconv_minmax_fp32_ukernel_up1x25__scalar_fmagic, xnn_init_qs8_minmax_scalar_fmagic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
-TEST(QC8_DWCONV_MINMAX_FP32_UP1X25__SCALAR_MAGIC, c_gt_1_with_qmin) {
+TEST(QC8_DWCONV_MINMAX_FP32_UP1X25__SCALAR_FMAGIC, c_gt_1_with_qmin) {
for (uint32_t channels = 2; channels < 10; channels++) {
DWConvMicrokernelTester()
.cr(1)
.kr(25)
.channels(channels)
.qmin(128)
- .Test(xnn_qc8_dwconv_minmax_fp32_ukernel_up1x25__scalar_magic, xnn_init_qs8_minmax_scalar_magic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qc8_dwconv_minmax_fp32_ukernel_up1x25__scalar_fmagic, xnn_init_qs8_minmax_scalar_fmagic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
-TEST(QC8_DWCONV_MINMAX_FP32_UP1X25__SCALAR_MAGIC, c_gt_1_with_qmax) {
+TEST(QC8_DWCONV_MINMAX_FP32_UP1X25__SCALAR_FMAGIC, c_gt_1_with_qmax) {
for (uint32_t channels = 2; channels < 10; channels++) {
DWConvMicrokernelTester()
.cr(1)
.kr(25)
.channels(channels)
.qmax(128)
- .Test(xnn_qc8_dwconv_minmax_fp32_ukernel_up1x25__scalar_magic, xnn_init_qs8_minmax_scalar_magic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qc8_dwconv_minmax_fp32_ukernel_up1x25__scalar_fmagic, xnn_init_qs8_minmax_scalar_fmagic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
-TEST(QC8_DWCONV_MINMAX_FP32_UP1X25__SCALAR_MAGIC, multipixel) {
+TEST(QC8_DWCONV_MINMAX_FP32_UP1X25__SCALAR_FMAGIC, multipixel) {
for (size_t channels = 1; channels <= 5; channels += 1) {
DWConvMicrokernelTester()
.cr(1)
.kr(25)
.channels(channels)
.width(3)
- .Test(xnn_qc8_dwconv_minmax_fp32_ukernel_up1x25__scalar_magic, xnn_init_qs8_minmax_scalar_magic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qc8_dwconv_minmax_fp32_ukernel_up1x25__scalar_fmagic, xnn_init_qs8_minmax_scalar_fmagic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
-TEST(QC8_DWCONV_MINMAX_FP32_UP1X25__SCALAR_MAGIC, multipixel_with_step) {
+TEST(QC8_DWCONV_MINMAX_FP32_UP1X25__SCALAR_FMAGIC, multipixel_with_step) {
for (size_t channels = 1; channels <= 5; channels += 1) {
for (size_t step = 2; step <= 25; step++) {
DWConvMicrokernelTester()
@@ -25094,12 +25094,12 @@
.channels(channels)
.width(3)
.step(step)
- .Test(xnn_qc8_dwconv_minmax_fp32_ukernel_up1x25__scalar_magic, xnn_init_qs8_minmax_scalar_magic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qc8_dwconv_minmax_fp32_ukernel_up1x25__scalar_fmagic, xnn_init_qs8_minmax_scalar_fmagic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
}
-TEST(QC8_DWCONV_MINMAX_FP32_UP1X25__SCALAR_MAGIC, multipixel_with_output_stride) {
+TEST(QC8_DWCONV_MINMAX_FP32_UP1X25__SCALAR_FMAGIC, multipixel_with_output_stride) {
for (size_t channels = 1; channels <= 5; channels += 1) {
DWConvMicrokernelTester()
.cr(1)
@@ -25107,11 +25107,11 @@
.channels(1)
.width(5)
.output_stride(7)
- .Test(xnn_qc8_dwconv_minmax_fp32_ukernel_up1x25__scalar_magic, xnn_init_qs8_minmax_scalar_magic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qc8_dwconv_minmax_fp32_ukernel_up1x25__scalar_fmagic, xnn_init_qs8_minmax_scalar_fmagic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
-TEST(QC8_DWCONV_MINMAX_FP32_UP1X25__SCALAR_MAGIC, multipixel_with_qmin) {
+TEST(QC8_DWCONV_MINMAX_FP32_UP1X25__SCALAR_FMAGIC, multipixel_with_qmin) {
for (size_t channels = 1; channels <= 5; channels += 1) {
DWConvMicrokernelTester()
.cr(1)
@@ -25119,11 +25119,11 @@
.channels(channels)
.width(3)
.qmin(128)
- .Test(xnn_qc8_dwconv_minmax_fp32_ukernel_up1x25__scalar_magic, xnn_init_qs8_minmax_scalar_magic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qc8_dwconv_minmax_fp32_ukernel_up1x25__scalar_fmagic, xnn_init_qs8_minmax_scalar_fmagic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
-TEST(QC8_DWCONV_MINMAX_FP32_UP1X25__SCALAR_MAGIC, multipixel_with_qmax) {
+TEST(QC8_DWCONV_MINMAX_FP32_UP1X25__SCALAR_FMAGIC, multipixel_with_qmax) {
for (size_t channels = 1; channels <= 5; channels += 1) {
DWConvMicrokernelTester()
.cr(1)
@@ -25131,22 +25131,22 @@
.channels(channels)
.width(3)
.qmax(128)
- .Test(xnn_qc8_dwconv_minmax_fp32_ukernel_up1x25__scalar_magic, xnn_init_qs8_minmax_scalar_magic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qc8_dwconv_minmax_fp32_ukernel_up1x25__scalar_fmagic, xnn_init_qs8_minmax_scalar_fmagic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
-TEST(QC8_DWCONV_MINMAX_FP32_UP1X25__SCALAR_MAGIC, input_offset) {
+TEST(QC8_DWCONV_MINMAX_FP32_UP1X25__SCALAR_FMAGIC, input_offset) {
for (uint32_t channels = 2; channels < 16; channels += 3) {
DWConvMicrokernelTester()
.cr(1)
.kr(25)
.channels(channels)
.input_offset(48)
- .Test(xnn_qc8_dwconv_minmax_fp32_ukernel_up1x25__scalar_magic, xnn_init_qs8_minmax_scalar_magic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qc8_dwconv_minmax_fp32_ukernel_up1x25__scalar_fmagic, xnn_init_qs8_minmax_scalar_fmagic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
-TEST(QC8_DWCONV_MINMAX_FP32_UP1X25__SCALAR_MAGIC, zero) {
+TEST(QC8_DWCONV_MINMAX_FP32_UP1X25__SCALAR_FMAGIC, zero) {
for (uint32_t mz = 0; mz < 25; mz++) {
for (uint32_t channels = 2; channels < 16; channels += 3) {
DWConvMicrokernelTester()
@@ -25155,105 +25155,105 @@
.channels(channels)
.input_offset(48)
.zero_index(mz)
- .Test(xnn_qc8_dwconv_minmax_fp32_ukernel_up1x25__scalar_magic, xnn_init_qs8_minmax_scalar_magic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qc8_dwconv_minmax_fp32_ukernel_up1x25__scalar_fmagic, xnn_init_qs8_minmax_scalar_fmagic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
}
-TEST(QC8_DWCONV_MINMAX_FP32_UP2X25__SCALAR_MAGIC, c_eq_2) {
+TEST(QC8_DWCONV_MINMAX_FP32_UP2X25__SCALAR_FMAGIC, c_eq_2) {
DWConvMicrokernelTester()
.cr(2)
.kr(25)
.channels(2)
- .Test(xnn_qc8_dwconv_minmax_fp32_ukernel_up2x25__scalar_magic, xnn_init_qs8_minmax_scalar_magic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qc8_dwconv_minmax_fp32_ukernel_up2x25__scalar_fmagic, xnn_init_qs8_minmax_scalar_fmagic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
-TEST(QC8_DWCONV_MINMAX_FP32_UP2X25__SCALAR_MAGIC, c_div_2) {
+TEST(QC8_DWCONV_MINMAX_FP32_UP2X25__SCALAR_FMAGIC, c_div_2) {
for (uint32_t channels = 4; channels < 32; channels += 6) {
DWConvMicrokernelTester()
.cr(2)
.kr(25)
.channels(channels)
- .Test(xnn_qc8_dwconv_minmax_fp32_ukernel_up2x25__scalar_magic, xnn_init_qs8_minmax_scalar_magic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qc8_dwconv_minmax_fp32_ukernel_up2x25__scalar_fmagic, xnn_init_qs8_minmax_scalar_fmagic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
-TEST(QC8_DWCONV_MINMAX_FP32_UP2X25__SCALAR_MAGIC, c_div_2_with_qmin) {
+TEST(QC8_DWCONV_MINMAX_FP32_UP2X25__SCALAR_FMAGIC, c_div_2_with_qmin) {
for (uint32_t channels = 4; channels < 32; channels += 6) {
DWConvMicrokernelTester()
.cr(2)
.kr(25)
.channels(channels)
.qmin(128)
- .Test(xnn_qc8_dwconv_minmax_fp32_ukernel_up2x25__scalar_magic, xnn_init_qs8_minmax_scalar_magic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qc8_dwconv_minmax_fp32_ukernel_up2x25__scalar_fmagic, xnn_init_qs8_minmax_scalar_fmagic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
-TEST(QC8_DWCONV_MINMAX_FP32_UP2X25__SCALAR_MAGIC, c_div_2_with_qmax) {
+TEST(QC8_DWCONV_MINMAX_FP32_UP2X25__SCALAR_FMAGIC, c_div_2_with_qmax) {
for (uint32_t channels = 4; channels < 32; channels += 6) {
DWConvMicrokernelTester()
.cr(2)
.kr(25)
.channels(channels)
.qmax(128)
- .Test(xnn_qc8_dwconv_minmax_fp32_ukernel_up2x25__scalar_magic, xnn_init_qs8_minmax_scalar_magic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qc8_dwconv_minmax_fp32_ukernel_up2x25__scalar_fmagic, xnn_init_qs8_minmax_scalar_fmagic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
-TEST(QC8_DWCONV_MINMAX_FP32_UP2X25__SCALAR_MAGIC, c_lt_2) {
+TEST(QC8_DWCONV_MINMAX_FP32_UP2X25__SCALAR_FMAGIC, c_lt_2) {
for (uint32_t channels = 1; channels < 2; channels++) {
DWConvMicrokernelTester()
.cr(2)
.kr(25)
.channels(channels)
- .Test(xnn_qc8_dwconv_minmax_fp32_ukernel_up2x25__scalar_magic, xnn_init_qs8_minmax_scalar_magic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qc8_dwconv_minmax_fp32_ukernel_up2x25__scalar_fmagic, xnn_init_qs8_minmax_scalar_fmagic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
-TEST(QC8_DWCONV_MINMAX_FP32_UP2X25__SCALAR_MAGIC, c_gt_2) {
+TEST(QC8_DWCONV_MINMAX_FP32_UP2X25__SCALAR_FMAGIC, c_gt_2) {
for (uint32_t channels = 3; channels < 4; channels++) {
DWConvMicrokernelTester()
.cr(2)
.kr(25)
.channels(channels)
- .Test(xnn_qc8_dwconv_minmax_fp32_ukernel_up2x25__scalar_magic, xnn_init_qs8_minmax_scalar_magic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qc8_dwconv_minmax_fp32_ukernel_up2x25__scalar_fmagic, xnn_init_qs8_minmax_scalar_fmagic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
-TEST(QC8_DWCONV_MINMAX_FP32_UP2X25__SCALAR_MAGIC, c_gt_2_with_qmin) {
+TEST(QC8_DWCONV_MINMAX_FP32_UP2X25__SCALAR_FMAGIC, c_gt_2_with_qmin) {
for (uint32_t channels = 3; channels < 4; channels++) {
DWConvMicrokernelTester()
.cr(2)
.kr(25)
.channels(channels)
.qmin(128)
- .Test(xnn_qc8_dwconv_minmax_fp32_ukernel_up2x25__scalar_magic, xnn_init_qs8_minmax_scalar_magic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qc8_dwconv_minmax_fp32_ukernel_up2x25__scalar_fmagic, xnn_init_qs8_minmax_scalar_fmagic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
-TEST(QC8_DWCONV_MINMAX_FP32_UP2X25__SCALAR_MAGIC, c_gt_2_with_qmax) {
+TEST(QC8_DWCONV_MINMAX_FP32_UP2X25__SCALAR_FMAGIC, c_gt_2_with_qmax) {
for (uint32_t channels = 3; channels < 4; channels++) {
DWConvMicrokernelTester()
.cr(2)
.kr(25)
.channels(channels)
.qmax(128)
- .Test(xnn_qc8_dwconv_minmax_fp32_ukernel_up2x25__scalar_magic, xnn_init_qs8_minmax_scalar_magic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qc8_dwconv_minmax_fp32_ukernel_up2x25__scalar_fmagic, xnn_init_qs8_minmax_scalar_fmagic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
-TEST(QC8_DWCONV_MINMAX_FP32_UP2X25__SCALAR_MAGIC, multipixel) {
+TEST(QC8_DWCONV_MINMAX_FP32_UP2X25__SCALAR_FMAGIC, multipixel) {
for (size_t channels = 1; channels <= 10; channels += 1) {
DWConvMicrokernelTester()
.cr(2)
.kr(25)
.channels(channels)
.width(3)
- .Test(xnn_qc8_dwconv_minmax_fp32_ukernel_up2x25__scalar_magic, xnn_init_qs8_minmax_scalar_magic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qc8_dwconv_minmax_fp32_ukernel_up2x25__scalar_fmagic, xnn_init_qs8_minmax_scalar_fmagic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
-TEST(QC8_DWCONV_MINMAX_FP32_UP2X25__SCALAR_MAGIC, multipixel_with_step) {
+TEST(QC8_DWCONV_MINMAX_FP32_UP2X25__SCALAR_FMAGIC, multipixel_with_step) {
for (size_t channels = 1; channels <= 10; channels += 1) {
for (size_t step = 2; step <= 25; step++) {
DWConvMicrokernelTester()
@@ -25262,12 +25262,12 @@
.channels(channels)
.width(3)
.step(step)
- .Test(xnn_qc8_dwconv_minmax_fp32_ukernel_up2x25__scalar_magic, xnn_init_qs8_minmax_scalar_magic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qc8_dwconv_minmax_fp32_ukernel_up2x25__scalar_fmagic, xnn_init_qs8_minmax_scalar_fmagic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
}
-TEST(QC8_DWCONV_MINMAX_FP32_UP2X25__SCALAR_MAGIC, multipixel_with_output_stride) {
+TEST(QC8_DWCONV_MINMAX_FP32_UP2X25__SCALAR_FMAGIC, multipixel_with_output_stride) {
for (size_t channels = 1; channels <= 10; channels += 1) {
DWConvMicrokernelTester()
.cr(2)
@@ -25275,11 +25275,11 @@
.channels(2)
.width(5)
.output_stride(13)
- .Test(xnn_qc8_dwconv_minmax_fp32_ukernel_up2x25__scalar_magic, xnn_init_qs8_minmax_scalar_magic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qc8_dwconv_minmax_fp32_ukernel_up2x25__scalar_fmagic, xnn_init_qs8_minmax_scalar_fmagic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
-TEST(QC8_DWCONV_MINMAX_FP32_UP2X25__SCALAR_MAGIC, multipixel_with_qmin) {
+TEST(QC8_DWCONV_MINMAX_FP32_UP2X25__SCALAR_FMAGIC, multipixel_with_qmin) {
for (size_t channels = 1; channels <= 10; channels += 1) {
DWConvMicrokernelTester()
.cr(2)
@@ -25287,11 +25287,11 @@
.channels(channels)
.width(3)
.qmin(128)
- .Test(xnn_qc8_dwconv_minmax_fp32_ukernel_up2x25__scalar_magic, xnn_init_qs8_minmax_scalar_magic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qc8_dwconv_minmax_fp32_ukernel_up2x25__scalar_fmagic, xnn_init_qs8_minmax_scalar_fmagic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
-TEST(QC8_DWCONV_MINMAX_FP32_UP2X25__SCALAR_MAGIC, multipixel_with_qmax) {
+TEST(QC8_DWCONV_MINMAX_FP32_UP2X25__SCALAR_FMAGIC, multipixel_with_qmax) {
for (size_t channels = 1; channels <= 10; channels += 1) {
DWConvMicrokernelTester()
.cr(2)
@@ -25299,22 +25299,22 @@
.channels(channels)
.width(3)
.qmax(128)
- .Test(xnn_qc8_dwconv_minmax_fp32_ukernel_up2x25__scalar_magic, xnn_init_qs8_minmax_scalar_magic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qc8_dwconv_minmax_fp32_ukernel_up2x25__scalar_fmagic, xnn_init_qs8_minmax_scalar_fmagic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
-TEST(QC8_DWCONV_MINMAX_FP32_UP2X25__SCALAR_MAGIC, input_offset) {
+TEST(QC8_DWCONV_MINMAX_FP32_UP2X25__SCALAR_FMAGIC, input_offset) {
for (uint32_t channels = 4; channels < 32; channels += 6) {
DWConvMicrokernelTester()
.cr(2)
.kr(25)
.channels(channels)
.input_offset(80)
- .Test(xnn_qc8_dwconv_minmax_fp32_ukernel_up2x25__scalar_magic, xnn_init_qs8_minmax_scalar_magic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qc8_dwconv_minmax_fp32_ukernel_up2x25__scalar_fmagic, xnn_init_qs8_minmax_scalar_fmagic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
-TEST(QC8_DWCONV_MINMAX_FP32_UP2X25__SCALAR_MAGIC, zero) {
+TEST(QC8_DWCONV_MINMAX_FP32_UP2X25__SCALAR_FMAGIC, zero) {
for (uint32_t mz = 0; mz < 25; mz++) {
for (uint32_t channels = 4; channels < 32; channels += 6) {
DWConvMicrokernelTester()
@@ -25323,105 +25323,105 @@
.channels(channels)
.input_offset(80)
.zero_index(mz)
- .Test(xnn_qc8_dwconv_minmax_fp32_ukernel_up2x25__scalar_magic, xnn_init_qs8_minmax_scalar_magic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qc8_dwconv_minmax_fp32_ukernel_up2x25__scalar_fmagic, xnn_init_qs8_minmax_scalar_fmagic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
}
-TEST(QC8_DWCONV_MINMAX_FP32_UP4X25__SCALAR_MAGIC, c_eq_4) {
+TEST(QC8_DWCONV_MINMAX_FP32_UP4X25__SCALAR_FMAGIC, c_eq_4) {
DWConvMicrokernelTester()
.cr(4)
.kr(25)
.channels(4)
- .Test(xnn_qc8_dwconv_minmax_fp32_ukernel_up4x25__scalar_magic, xnn_init_qs8_minmax_scalar_magic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qc8_dwconv_minmax_fp32_ukernel_up4x25__scalar_fmagic, xnn_init_qs8_minmax_scalar_fmagic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
-TEST(QC8_DWCONV_MINMAX_FP32_UP4X25__SCALAR_MAGIC, c_div_4) {
+TEST(QC8_DWCONV_MINMAX_FP32_UP4X25__SCALAR_FMAGIC, c_div_4) {
for (uint32_t channels = 8; channels < 64; channels += 12) {
DWConvMicrokernelTester()
.cr(4)
.kr(25)
.channels(channels)
- .Test(xnn_qc8_dwconv_minmax_fp32_ukernel_up4x25__scalar_magic, xnn_init_qs8_minmax_scalar_magic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qc8_dwconv_minmax_fp32_ukernel_up4x25__scalar_fmagic, xnn_init_qs8_minmax_scalar_fmagic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
-TEST(QC8_DWCONV_MINMAX_FP32_UP4X25__SCALAR_MAGIC, c_div_4_with_qmin) {
+TEST(QC8_DWCONV_MINMAX_FP32_UP4X25__SCALAR_FMAGIC, c_div_4_with_qmin) {
for (uint32_t channels = 8; channels < 64; channels += 12) {
DWConvMicrokernelTester()
.cr(4)
.kr(25)
.channels(channels)
.qmin(128)
- .Test(xnn_qc8_dwconv_minmax_fp32_ukernel_up4x25__scalar_magic, xnn_init_qs8_minmax_scalar_magic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qc8_dwconv_minmax_fp32_ukernel_up4x25__scalar_fmagic, xnn_init_qs8_minmax_scalar_fmagic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
-TEST(QC8_DWCONV_MINMAX_FP32_UP4X25__SCALAR_MAGIC, c_div_4_with_qmax) {
+TEST(QC8_DWCONV_MINMAX_FP32_UP4X25__SCALAR_FMAGIC, c_div_4_with_qmax) {
for (uint32_t channels = 8; channels < 64; channels += 12) {
DWConvMicrokernelTester()
.cr(4)
.kr(25)
.channels(channels)
.qmax(128)
- .Test(xnn_qc8_dwconv_minmax_fp32_ukernel_up4x25__scalar_magic, xnn_init_qs8_minmax_scalar_magic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qc8_dwconv_minmax_fp32_ukernel_up4x25__scalar_fmagic, xnn_init_qs8_minmax_scalar_fmagic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
-TEST(QC8_DWCONV_MINMAX_FP32_UP4X25__SCALAR_MAGIC, c_lt_4) {
+TEST(QC8_DWCONV_MINMAX_FP32_UP4X25__SCALAR_FMAGIC, c_lt_4) {
for (uint32_t channels = 1; channels < 4; channels++) {
DWConvMicrokernelTester()
.cr(4)
.kr(25)
.channels(channels)
- .Test(xnn_qc8_dwconv_minmax_fp32_ukernel_up4x25__scalar_magic, xnn_init_qs8_minmax_scalar_magic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qc8_dwconv_minmax_fp32_ukernel_up4x25__scalar_fmagic, xnn_init_qs8_minmax_scalar_fmagic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
-TEST(QC8_DWCONV_MINMAX_FP32_UP4X25__SCALAR_MAGIC, c_gt_4) {
+TEST(QC8_DWCONV_MINMAX_FP32_UP4X25__SCALAR_FMAGIC, c_gt_4) {
for (uint32_t channels = 5; channels < 8; channels++) {
DWConvMicrokernelTester()
.cr(4)
.kr(25)
.channels(channels)
- .Test(xnn_qc8_dwconv_minmax_fp32_ukernel_up4x25__scalar_magic, xnn_init_qs8_minmax_scalar_magic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qc8_dwconv_minmax_fp32_ukernel_up4x25__scalar_fmagic, xnn_init_qs8_minmax_scalar_fmagic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
-TEST(QC8_DWCONV_MINMAX_FP32_UP4X25__SCALAR_MAGIC, c_gt_4_with_qmin) {
+TEST(QC8_DWCONV_MINMAX_FP32_UP4X25__SCALAR_FMAGIC, c_gt_4_with_qmin) {
for (uint32_t channels = 5; channels < 8; channels++) {
DWConvMicrokernelTester()
.cr(4)
.kr(25)
.channels(channels)
.qmin(128)
- .Test(xnn_qc8_dwconv_minmax_fp32_ukernel_up4x25__scalar_magic, xnn_init_qs8_minmax_scalar_magic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qc8_dwconv_minmax_fp32_ukernel_up4x25__scalar_fmagic, xnn_init_qs8_minmax_scalar_fmagic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
-TEST(QC8_DWCONV_MINMAX_FP32_UP4X25__SCALAR_MAGIC, c_gt_4_with_qmax) {
+TEST(QC8_DWCONV_MINMAX_FP32_UP4X25__SCALAR_FMAGIC, c_gt_4_with_qmax) {
for (uint32_t channels = 5; channels < 8; channels++) {
DWConvMicrokernelTester()
.cr(4)
.kr(25)
.channels(channels)
.qmax(128)
- .Test(xnn_qc8_dwconv_minmax_fp32_ukernel_up4x25__scalar_magic, xnn_init_qs8_minmax_scalar_magic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qc8_dwconv_minmax_fp32_ukernel_up4x25__scalar_fmagic, xnn_init_qs8_minmax_scalar_fmagic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
-TEST(QC8_DWCONV_MINMAX_FP32_UP4X25__SCALAR_MAGIC, multipixel) {
+TEST(QC8_DWCONV_MINMAX_FP32_UP4X25__SCALAR_FMAGIC, multipixel) {
for (size_t channels = 1; channels <= 20; channels += 3) {
DWConvMicrokernelTester()
.cr(4)
.kr(25)
.channels(channels)
.width(3)
- .Test(xnn_qc8_dwconv_minmax_fp32_ukernel_up4x25__scalar_magic, xnn_init_qs8_minmax_scalar_magic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qc8_dwconv_minmax_fp32_ukernel_up4x25__scalar_fmagic, xnn_init_qs8_minmax_scalar_fmagic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
-TEST(QC8_DWCONV_MINMAX_FP32_UP4X25__SCALAR_MAGIC, multipixel_with_step) {
+TEST(QC8_DWCONV_MINMAX_FP32_UP4X25__SCALAR_FMAGIC, multipixel_with_step) {
for (size_t channels = 1; channels <= 20; channels += 3) {
for (size_t step = 2; step <= 25; step++) {
DWConvMicrokernelTester()
@@ -25430,12 +25430,12 @@
.channels(channels)
.width(3)
.step(step)
- .Test(xnn_qc8_dwconv_minmax_fp32_ukernel_up4x25__scalar_magic, xnn_init_qs8_minmax_scalar_magic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qc8_dwconv_minmax_fp32_ukernel_up4x25__scalar_fmagic, xnn_init_qs8_minmax_scalar_fmagic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
}
-TEST(QC8_DWCONV_MINMAX_FP32_UP4X25__SCALAR_MAGIC, multipixel_with_output_stride) {
+TEST(QC8_DWCONV_MINMAX_FP32_UP4X25__SCALAR_FMAGIC, multipixel_with_output_stride) {
for (size_t channels = 1; channels <= 20; channels += 3) {
DWConvMicrokernelTester()
.cr(4)
@@ -25443,11 +25443,11 @@
.channels(4)
.width(5)
.output_stride(23)
- .Test(xnn_qc8_dwconv_minmax_fp32_ukernel_up4x25__scalar_magic, xnn_init_qs8_minmax_scalar_magic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qc8_dwconv_minmax_fp32_ukernel_up4x25__scalar_fmagic, xnn_init_qs8_minmax_scalar_fmagic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
-TEST(QC8_DWCONV_MINMAX_FP32_UP4X25__SCALAR_MAGIC, multipixel_with_qmin) {
+TEST(QC8_DWCONV_MINMAX_FP32_UP4X25__SCALAR_FMAGIC, multipixel_with_qmin) {
for (size_t channels = 1; channels <= 20; channels += 3) {
DWConvMicrokernelTester()
.cr(4)
@@ -25455,11 +25455,11 @@
.channels(channels)
.width(3)
.qmin(128)
- .Test(xnn_qc8_dwconv_minmax_fp32_ukernel_up4x25__scalar_magic, xnn_init_qs8_minmax_scalar_magic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qc8_dwconv_minmax_fp32_ukernel_up4x25__scalar_fmagic, xnn_init_qs8_minmax_scalar_fmagic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
-TEST(QC8_DWCONV_MINMAX_FP32_UP4X25__SCALAR_MAGIC, multipixel_with_qmax) {
+TEST(QC8_DWCONV_MINMAX_FP32_UP4X25__SCALAR_FMAGIC, multipixel_with_qmax) {
for (size_t channels = 1; channels <= 20; channels += 3) {
DWConvMicrokernelTester()
.cr(4)
@@ -25467,22 +25467,22 @@
.channels(channels)
.width(3)
.qmax(128)
- .Test(xnn_qc8_dwconv_minmax_fp32_ukernel_up4x25__scalar_magic, xnn_init_qs8_minmax_scalar_magic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qc8_dwconv_minmax_fp32_ukernel_up4x25__scalar_fmagic, xnn_init_qs8_minmax_scalar_fmagic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
-TEST(QC8_DWCONV_MINMAX_FP32_UP4X25__SCALAR_MAGIC, input_offset) {
+TEST(QC8_DWCONV_MINMAX_FP32_UP4X25__SCALAR_FMAGIC, input_offset) {
for (uint32_t channels = 8; channels < 64; channels += 12) {
DWConvMicrokernelTester()
.cr(4)
.kr(25)
.channels(channels)
.input_offset(112)
- .Test(xnn_qc8_dwconv_minmax_fp32_ukernel_up4x25__scalar_magic, xnn_init_qs8_minmax_scalar_magic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qc8_dwconv_minmax_fp32_ukernel_up4x25__scalar_fmagic, xnn_init_qs8_minmax_scalar_fmagic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
-TEST(QC8_DWCONV_MINMAX_FP32_UP4X25__SCALAR_MAGIC, zero) {
+TEST(QC8_DWCONV_MINMAX_FP32_UP4X25__SCALAR_FMAGIC, zero) {
for (uint32_t mz = 0; mz < 25; mz++) {
for (uint32_t channels = 8; channels < 64; channels += 12) {
DWConvMicrokernelTester()
@@ -25491,7 +25491,7 @@
.channels(channels)
.input_offset(112)
.zero_index(mz)
- .Test(xnn_qc8_dwconv_minmax_fp32_ukernel_up4x25__scalar_magic, xnn_init_qs8_minmax_scalar_magic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qc8_dwconv_minmax_fp32_ukernel_up4x25__scalar_fmagic, xnn_init_qs8_minmax_scalar_fmagic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
}
\ No newline at end of file
diff --git a/test/qc8-dwconv-minmax-fp32.yaml b/test/qc8-dwconv-minmax-fp32.yaml
index cb8e753..b0f0bb2 100644
--- a/test/qc8-dwconv-minmax-fp32.yaml
+++ b/test/qc8-dwconv-minmax-fp32.yaml
@@ -131,18 +131,18 @@
init: xnn_init_qs8_minmax_wasmsimd_params
- name: xnn_qc8_dwconv_minmax_fp32_ukernel_up24x9__wasmsimd_mul16_add16
init: xnn_init_qs8_minmax_wasmsimd_params
-- name: xnn_qc8_dwconv_minmax_fp32_ukernel_up1x9__scalar_lrint
- init: xnn_init_qs8_minmax_scalar_lrint_params
-- name: xnn_qc8_dwconv_minmax_fp32_ukernel_up2x9__scalar_lrint
- init: xnn_init_qs8_minmax_scalar_lrint_params
-- name: xnn_qc8_dwconv_minmax_fp32_ukernel_up4x9__scalar_lrint
- init: xnn_init_qs8_minmax_scalar_lrint_params
-- name: xnn_qc8_dwconv_minmax_fp32_ukernel_up1x9__scalar_magic
- init: xnn_init_qs8_minmax_scalar_magic_params
-- name: xnn_qc8_dwconv_minmax_fp32_ukernel_up2x9__scalar_magic
- init: xnn_init_qs8_minmax_scalar_magic_params
-- name: xnn_qc8_dwconv_minmax_fp32_ukernel_up4x9__scalar_magic
- init: xnn_init_qs8_minmax_scalar_magic_params
+- name: xnn_qc8_dwconv_minmax_fp32_ukernel_up1x9__scalar_lrintf
+ init: xnn_init_qs8_minmax_scalar_lrintf_params
+- name: xnn_qc8_dwconv_minmax_fp32_ukernel_up2x9__scalar_lrintf
+ init: xnn_init_qs8_minmax_scalar_lrintf_params
+- name: xnn_qc8_dwconv_minmax_fp32_ukernel_up4x9__scalar_lrintf
+ init: xnn_init_qs8_minmax_scalar_lrintf_params
+- name: xnn_qc8_dwconv_minmax_fp32_ukernel_up1x9__scalar_fmagic
+ init: xnn_init_qs8_minmax_scalar_fmagic_params
+- name: xnn_qc8_dwconv_minmax_fp32_ukernel_up2x9__scalar_fmagic
+ init: xnn_init_qs8_minmax_scalar_fmagic_params
+- name: xnn_qc8_dwconv_minmax_fp32_ukernel_up4x9__scalar_fmagic
+ init: xnn_init_qs8_minmax_scalar_fmagic_params
- name: xnn_qc8_dwconv_minmax_fp32_ukernel_up8x25__neon_mul8_ld64
init: xnn_init_qs8_minmax_neon_params
- name: xnn_qc8_dwconv_minmax_fp32_ukernel_up16x25__neon_mul8_ld64
@@ -271,15 +271,15 @@
init: xnn_init_qs8_minmax_wasmsimd_params
- name: xnn_qc8_dwconv_minmax_fp32_ukernel_up24x25__wasmsimd_mul16_add16
init: xnn_init_qs8_minmax_wasmsimd_params
-- name: xnn_qc8_dwconv_minmax_fp32_ukernel_up1x25__scalar_lrint
- init: xnn_init_qs8_minmax_scalar_lrint_params
-- name: xnn_qc8_dwconv_minmax_fp32_ukernel_up2x25__scalar_lrint
- init: xnn_init_qs8_minmax_scalar_lrint_params
-- name: xnn_qc8_dwconv_minmax_fp32_ukernel_up4x25__scalar_lrint
- init: xnn_init_qs8_minmax_scalar_lrint_params
-- name: xnn_qc8_dwconv_minmax_fp32_ukernel_up1x25__scalar_magic
- init: xnn_init_qs8_minmax_scalar_magic_params
-- name: xnn_qc8_dwconv_minmax_fp32_ukernel_up2x25__scalar_magic
- init: xnn_init_qs8_minmax_scalar_magic_params
-- name: xnn_qc8_dwconv_minmax_fp32_ukernel_up4x25__scalar_magic
- init: xnn_init_qs8_minmax_scalar_magic_params
+- name: xnn_qc8_dwconv_minmax_fp32_ukernel_up1x25__scalar_lrintf
+ init: xnn_init_qs8_minmax_scalar_lrintf_params
+- name: xnn_qc8_dwconv_minmax_fp32_ukernel_up2x25__scalar_lrintf
+ init: xnn_init_qs8_minmax_scalar_lrintf_params
+- name: xnn_qc8_dwconv_minmax_fp32_ukernel_up4x25__scalar_lrintf
+ init: xnn_init_qs8_minmax_scalar_lrintf_params
+- name: xnn_qc8_dwconv_minmax_fp32_ukernel_up1x25__scalar_fmagic
+ init: xnn_init_qs8_minmax_scalar_fmagic_params
+- name: xnn_qc8_dwconv_minmax_fp32_ukernel_up2x25__scalar_fmagic
+ init: xnn_init_qs8_minmax_scalar_fmagic_params
+- name: xnn_qc8_dwconv_minmax_fp32_ukernel_up4x25__scalar_fmagic
+ init: xnn_init_qs8_minmax_scalar_fmagic_params
diff --git a/test/qc8-gemm-minmax-fp32.cc b/test/qc8-gemm-minmax-fp32.cc
index e665e91..80b2a7d 100644
--- a/test/qc8-gemm-minmax-fp32.cc
+++ b/test/qc8-gemm-minmax-fp32.cc
@@ -77180,7 +77180,7 @@
#endif // XNN_ARCH_WASMSIMD || XNN_ARCH_WASMRELAXEDSIMD
-TEST(QC8_GEMM_MINMAX_FP32_1X2__SCALAR_LRINT, k_eq_1) {
+TEST(QC8_GEMM_MINMAX_FP32_1X2__SCALAR_LRINTF, k_eq_1) {
GemmMicrokernelTester()
.mr(1)
.nr(2)
@@ -77189,10 +77189,10 @@
.m(1)
.n(2)
.k(1)
- .Test(xnn_qc8_gemm_minmax_fp32_ukernel_1x2__scalar_lrint, xnn_init_qs8_minmax_scalar_lrint_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qc8_gemm_minmax_fp32_ukernel_1x2__scalar_lrintf, xnn_init_qs8_minmax_scalar_lrintf_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
-TEST(QC8_GEMM_MINMAX_FP32_1X2__SCALAR_LRINT, strided_cn) {
+TEST(QC8_GEMM_MINMAX_FP32_1X2__SCALAR_LRINTF, strided_cn) {
GemmMicrokernelTester()
.mr(1)
.nr(2)
@@ -77202,10 +77202,10 @@
.n(2)
.k(1)
.cn_stride(5)
- .Test(xnn_qc8_gemm_minmax_fp32_ukernel_1x2__scalar_lrint, xnn_init_qs8_minmax_scalar_lrint_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qc8_gemm_minmax_fp32_ukernel_1x2__scalar_lrintf, xnn_init_qs8_minmax_scalar_lrintf_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
-TEST(QC8_GEMM_MINMAX_FP32_1X2__SCALAR_LRINT, k_eq_1_strided_a) {
+TEST(QC8_GEMM_MINMAX_FP32_1X2__SCALAR_LRINTF, k_eq_1_strided_a) {
GemmMicrokernelTester()
.mr(1)
.nr(2)
@@ -77215,10 +77215,10 @@
.n(2)
.k(1)
.a_stride(3)
- .Test(xnn_qc8_gemm_minmax_fp32_ukernel_1x2__scalar_lrint, xnn_init_qs8_minmax_scalar_lrint_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qc8_gemm_minmax_fp32_ukernel_1x2__scalar_lrintf, xnn_init_qs8_minmax_scalar_lrintf_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
-TEST(QC8_GEMM_MINMAX_FP32_1X2__SCALAR_LRINT, k_eq_1_subtile) {
+TEST(QC8_GEMM_MINMAX_FP32_1X2__SCALAR_LRINTF, k_eq_1_subtile) {
for (uint32_t m = 1; m <= 1; m++) {
for (uint32_t n = 1; n <= 2; n++) {
GemmMicrokernelTester()
@@ -77230,12 +77230,12 @@
.n(n)
.k(1)
.iterations(1)
- .Test(xnn_qc8_gemm_minmax_fp32_ukernel_1x2__scalar_lrint, xnn_init_qs8_minmax_scalar_lrint_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qc8_gemm_minmax_fp32_ukernel_1x2__scalar_lrintf, xnn_init_qs8_minmax_scalar_lrintf_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
}
-TEST(QC8_GEMM_MINMAX_FP32_1X2__SCALAR_LRINT, k_eq_1_subtile_m) {
+TEST(QC8_GEMM_MINMAX_FP32_1X2__SCALAR_LRINTF, k_eq_1_subtile_m) {
for (uint32_t m = 1; m <= 1; m++) {
GemmMicrokernelTester()
.mr(1)
@@ -77246,11 +77246,11 @@
.n(2)
.k(1)
.iterations(1)
- .Test(xnn_qc8_gemm_minmax_fp32_ukernel_1x2__scalar_lrint, xnn_init_qs8_minmax_scalar_lrint_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qc8_gemm_minmax_fp32_ukernel_1x2__scalar_lrintf, xnn_init_qs8_minmax_scalar_lrintf_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
-TEST(QC8_GEMM_MINMAX_FP32_1X2__SCALAR_LRINT, k_eq_1_subtile_n) {
+TEST(QC8_GEMM_MINMAX_FP32_1X2__SCALAR_LRINTF, k_eq_1_subtile_n) {
for (uint32_t n = 1; n <= 2; n++) {
GemmMicrokernelTester()
.mr(1)
@@ -77261,11 +77261,11 @@
.n(n)
.k(1)
.iterations(1)
- .Test(xnn_qc8_gemm_minmax_fp32_ukernel_1x2__scalar_lrint, xnn_init_qs8_minmax_scalar_lrint_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qc8_gemm_minmax_fp32_ukernel_1x2__scalar_lrintf, xnn_init_qs8_minmax_scalar_lrintf_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
-TEST(QC8_GEMM_MINMAX_FP32_1X2__SCALAR_LRINT, k_gt_1) {
+TEST(QC8_GEMM_MINMAX_FP32_1X2__SCALAR_LRINTF, k_gt_1) {
for (size_t k = 2; k < 10; k++) {
GemmMicrokernelTester()
.mr(1)
@@ -77275,11 +77275,11 @@
.m(1)
.n(2)
.k(k)
- .Test(xnn_qc8_gemm_minmax_fp32_ukernel_1x2__scalar_lrint, xnn_init_qs8_minmax_scalar_lrint_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qc8_gemm_minmax_fp32_ukernel_1x2__scalar_lrintf, xnn_init_qs8_minmax_scalar_lrintf_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
-TEST(QC8_GEMM_MINMAX_FP32_1X2__SCALAR_LRINT, k_gt_1_strided_a) {
+TEST(QC8_GEMM_MINMAX_FP32_1X2__SCALAR_LRINTF, k_gt_1_strided_a) {
for (size_t k = 2; k < 10; k++) {
GemmMicrokernelTester()
.mr(1)
@@ -77290,11 +77290,11 @@
.n(2)
.k(k)
.a_stride(11)
- .Test(xnn_qc8_gemm_minmax_fp32_ukernel_1x2__scalar_lrint, xnn_init_qs8_minmax_scalar_lrint_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qc8_gemm_minmax_fp32_ukernel_1x2__scalar_lrintf, xnn_init_qs8_minmax_scalar_lrintf_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
-TEST(QC8_GEMM_MINMAX_FP32_1X2__SCALAR_LRINT, k_gt_1_subtile) {
+TEST(QC8_GEMM_MINMAX_FP32_1X2__SCALAR_LRINTF, k_gt_1_subtile) {
for (size_t k = 2; k < 10; k++) {
for (uint32_t m = 1; m <= 1; m++) {
for (uint32_t n = 1; n <= 2; n++) {
@@ -77307,13 +77307,13 @@
.n(n)
.k(k)
.iterations(1)
- .Test(xnn_qc8_gemm_minmax_fp32_ukernel_1x2__scalar_lrint, xnn_init_qs8_minmax_scalar_lrint_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qc8_gemm_minmax_fp32_ukernel_1x2__scalar_lrintf, xnn_init_qs8_minmax_scalar_lrintf_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
}
}
-TEST(QC8_GEMM_MINMAX_FP32_1X2__SCALAR_LRINT, n_gt_2) {
+TEST(QC8_GEMM_MINMAX_FP32_1X2__SCALAR_LRINTF, n_gt_2) {
for (uint32_t n = 3; n < 4; n++) {
for (size_t k = 1; k <= 5; k += 2) {
GemmMicrokernelTester()
@@ -77324,12 +77324,12 @@
.m(1)
.n(2)
.k(k)
- .Test(xnn_qc8_gemm_minmax_fp32_ukernel_1x2__scalar_lrint, xnn_init_qs8_minmax_scalar_lrint_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qc8_gemm_minmax_fp32_ukernel_1x2__scalar_lrintf, xnn_init_qs8_minmax_scalar_lrintf_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
}
-TEST(QC8_GEMM_MINMAX_FP32_1X2__SCALAR_LRINT, n_gt_2_strided_cn) {
+TEST(QC8_GEMM_MINMAX_FP32_1X2__SCALAR_LRINTF, n_gt_2_strided_cn) {
for (uint32_t n = 3; n < 4; n++) {
for (size_t k = 1; k <= 5; k += 2) {
GemmMicrokernelTester()
@@ -77341,12 +77341,12 @@
.n(2)
.k(k)
.cn_stride(5)
- .Test(xnn_qc8_gemm_minmax_fp32_ukernel_1x2__scalar_lrint, xnn_init_qs8_minmax_scalar_lrint_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qc8_gemm_minmax_fp32_ukernel_1x2__scalar_lrintf, xnn_init_qs8_minmax_scalar_lrintf_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
}
-TEST(QC8_GEMM_MINMAX_FP32_1X2__SCALAR_LRINT, n_gt_2_strided_a) {
+TEST(QC8_GEMM_MINMAX_FP32_1X2__SCALAR_LRINTF, n_gt_2_strided_a) {
for (uint32_t n = 3; n < 4; n++) {
for (size_t k = 1; k <= 5; k += 2) {
GemmMicrokernelTester()
@@ -77358,12 +77358,12 @@
.n(n)
.k(k)
.a_stride(7)
- .Test(xnn_qc8_gemm_minmax_fp32_ukernel_1x2__scalar_lrint, xnn_init_qs8_minmax_scalar_lrint_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qc8_gemm_minmax_fp32_ukernel_1x2__scalar_lrintf, xnn_init_qs8_minmax_scalar_lrintf_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
}
-TEST(QC8_GEMM_MINMAX_FP32_1X2__SCALAR_LRINT, n_gt_2_subtile) {
+TEST(QC8_GEMM_MINMAX_FP32_1X2__SCALAR_LRINTF, n_gt_2_subtile) {
for (uint32_t n = 3; n < 4; n++) {
for (size_t k = 1; k <= 5; k += 2) {
for (uint32_t m = 1; m <= 1; m++) {
@@ -77376,13 +77376,13 @@
.n(n)
.k(k)
.iterations(1)
- .Test(xnn_qc8_gemm_minmax_fp32_ukernel_1x2__scalar_lrint, xnn_init_qs8_minmax_scalar_lrint_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qc8_gemm_minmax_fp32_ukernel_1x2__scalar_lrintf, xnn_init_qs8_minmax_scalar_lrintf_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
}
}
-TEST(QC8_GEMM_MINMAX_FP32_1X2__SCALAR_LRINT, n_div_2) {
+TEST(QC8_GEMM_MINMAX_FP32_1X2__SCALAR_LRINTF, n_div_2) {
for (uint32_t n = 4; n <= 6; n += 2) {
for (size_t k = 1; k <= 5; k += 2) {
GemmMicrokernelTester()
@@ -77393,12 +77393,12 @@
.m(1)
.n(2)
.k(k)
- .Test(xnn_qc8_gemm_minmax_fp32_ukernel_1x2__scalar_lrint, xnn_init_qs8_minmax_scalar_lrint_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qc8_gemm_minmax_fp32_ukernel_1x2__scalar_lrintf, xnn_init_qs8_minmax_scalar_lrintf_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
}
-TEST(QC8_GEMM_MINMAX_FP32_1X2__SCALAR_LRINT, n_div_2_strided_cn) {
+TEST(QC8_GEMM_MINMAX_FP32_1X2__SCALAR_LRINTF, n_div_2_strided_cn) {
for (uint32_t n = 4; n <= 6; n += 2) {
for (size_t k = 1; k <= 5; k += 2) {
GemmMicrokernelTester()
@@ -77410,12 +77410,12 @@
.n(n)
.k(k)
.cn_stride(5)
- .Test(xnn_qc8_gemm_minmax_fp32_ukernel_1x2__scalar_lrint, xnn_init_qs8_minmax_scalar_lrint_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qc8_gemm_minmax_fp32_ukernel_1x2__scalar_lrintf, xnn_init_qs8_minmax_scalar_lrintf_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
}
-TEST(QC8_GEMM_MINMAX_FP32_1X2__SCALAR_LRINT, n_div_2_strided_a) {
+TEST(QC8_GEMM_MINMAX_FP32_1X2__SCALAR_LRINTF, n_div_2_strided_a) {
for (uint32_t n = 4; n <= 6; n += 2) {
for (size_t k = 1; k <= 5; k += 2) {
GemmMicrokernelTester()
@@ -77427,12 +77427,12 @@
.n(n)
.k(k)
.a_stride(7)
- .Test(xnn_qc8_gemm_minmax_fp32_ukernel_1x2__scalar_lrint, xnn_init_qs8_minmax_scalar_lrint_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qc8_gemm_minmax_fp32_ukernel_1x2__scalar_lrintf, xnn_init_qs8_minmax_scalar_lrintf_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
}
-TEST(QC8_GEMM_MINMAX_FP32_1X2__SCALAR_LRINT, n_div_2_subtile) {
+TEST(QC8_GEMM_MINMAX_FP32_1X2__SCALAR_LRINTF, n_div_2_subtile) {
for (uint32_t n = 4; n <= 6; n += 2) {
for (size_t k = 1; k <= 5; k += 2) {
for (uint32_t m = 1; m <= 1; m++) {
@@ -77445,13 +77445,13 @@
.n(n)
.k(k)
.iterations(1)
- .Test(xnn_qc8_gemm_minmax_fp32_ukernel_1x2__scalar_lrint, xnn_init_qs8_minmax_scalar_lrint_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qc8_gemm_minmax_fp32_ukernel_1x2__scalar_lrintf, xnn_init_qs8_minmax_scalar_lrintf_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
}
}
-TEST(QC8_GEMM_MINMAX_FP32_1X2__SCALAR_LRINT, strided_cm_subtile) {
+TEST(QC8_GEMM_MINMAX_FP32_1X2__SCALAR_LRINTF, strided_cm_subtile) {
for (size_t k = 1; k <= 5; k += 2) {
for (uint32_t m = 1; m <= 1; m++) {
for (uint32_t n = 1; n <= 2; n++) {
@@ -77465,13 +77465,13 @@
.k(k)
.cm_stride(5)
.iterations(1)
- .Test(xnn_qc8_gemm_minmax_fp32_ukernel_1x2__scalar_lrint, xnn_init_qs8_minmax_scalar_lrint_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qc8_gemm_minmax_fp32_ukernel_1x2__scalar_lrintf, xnn_init_qs8_minmax_scalar_lrintf_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
}
}
-TEST(QC8_GEMM_MINMAX_FP32_1X2__SCALAR_LRINT, qmin) {
+TEST(QC8_GEMM_MINMAX_FP32_1X2__SCALAR_LRINTF, qmin) {
GemmMicrokernelTester()
.mr(1)
.nr(2)
@@ -77481,10 +77481,10 @@
.n(2)
.k(1)
.qmin(128)
- .Test(xnn_qc8_gemm_minmax_fp32_ukernel_1x2__scalar_lrint, xnn_init_qs8_minmax_scalar_lrint_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qc8_gemm_minmax_fp32_ukernel_1x2__scalar_lrintf, xnn_init_qs8_minmax_scalar_lrintf_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
-TEST(QC8_GEMM_MINMAX_FP32_1X2__SCALAR_LRINT, qmax) {
+TEST(QC8_GEMM_MINMAX_FP32_1X2__SCALAR_LRINTF, qmax) {
GemmMicrokernelTester()
.mr(1)
.nr(2)
@@ -77494,10 +77494,10 @@
.n(2)
.k(1)
.qmax(128)
- .Test(xnn_qc8_gemm_minmax_fp32_ukernel_1x2__scalar_lrint, xnn_init_qs8_minmax_scalar_lrint_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qc8_gemm_minmax_fp32_ukernel_1x2__scalar_lrintf, xnn_init_qs8_minmax_scalar_lrintf_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
-TEST(QC8_GEMM_MINMAX_FP32_1X2__SCALAR_LRINT, strided_cm) {
+TEST(QC8_GEMM_MINMAX_FP32_1X2__SCALAR_LRINTF, strided_cm) {
GemmMicrokernelTester()
.mr(1)
.nr(2)
@@ -77507,11 +77507,11 @@
.n(2)
.k(1)
.cm_stride(5)
- .Test(xnn_qc8_gemm_minmax_fp32_ukernel_1x2__scalar_lrint, xnn_init_qs8_minmax_scalar_lrint_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qc8_gemm_minmax_fp32_ukernel_1x2__scalar_lrintf, xnn_init_qs8_minmax_scalar_lrintf_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
-TEST(QC8_GEMM_MINMAX_FP32_2X2__SCALAR_LRINT, k_eq_1) {
+TEST(QC8_GEMM_MINMAX_FP32_2X2__SCALAR_LRINTF, k_eq_1) {
GemmMicrokernelTester()
.mr(2)
.nr(2)
@@ -77520,10 +77520,10 @@
.m(2)
.n(2)
.k(1)
- .Test(xnn_qc8_gemm_minmax_fp32_ukernel_2x2__scalar_lrint, xnn_init_qs8_minmax_scalar_lrint_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qc8_gemm_minmax_fp32_ukernel_2x2__scalar_lrintf, xnn_init_qs8_minmax_scalar_lrintf_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
-TEST(QC8_GEMM_MINMAX_FP32_2X2__SCALAR_LRINT, strided_cn) {
+TEST(QC8_GEMM_MINMAX_FP32_2X2__SCALAR_LRINTF, strided_cn) {
GemmMicrokernelTester()
.mr(2)
.nr(2)
@@ -77533,10 +77533,10 @@
.n(2)
.k(1)
.cn_stride(5)
- .Test(xnn_qc8_gemm_minmax_fp32_ukernel_2x2__scalar_lrint, xnn_init_qs8_minmax_scalar_lrint_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qc8_gemm_minmax_fp32_ukernel_2x2__scalar_lrintf, xnn_init_qs8_minmax_scalar_lrintf_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
-TEST(QC8_GEMM_MINMAX_FP32_2X2__SCALAR_LRINT, k_eq_1_strided_a) {
+TEST(QC8_GEMM_MINMAX_FP32_2X2__SCALAR_LRINTF, k_eq_1_strided_a) {
GemmMicrokernelTester()
.mr(2)
.nr(2)
@@ -77546,10 +77546,10 @@
.n(2)
.k(1)
.a_stride(3)
- .Test(xnn_qc8_gemm_minmax_fp32_ukernel_2x2__scalar_lrint, xnn_init_qs8_minmax_scalar_lrint_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qc8_gemm_minmax_fp32_ukernel_2x2__scalar_lrintf, xnn_init_qs8_minmax_scalar_lrintf_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
-TEST(QC8_GEMM_MINMAX_FP32_2X2__SCALAR_LRINT, k_eq_1_subtile) {
+TEST(QC8_GEMM_MINMAX_FP32_2X2__SCALAR_LRINTF, k_eq_1_subtile) {
for (uint32_t m = 1; m <= 2; m++) {
for (uint32_t n = 1; n <= 2; n++) {
GemmMicrokernelTester()
@@ -77561,12 +77561,12 @@
.n(n)
.k(1)
.iterations(1)
- .Test(xnn_qc8_gemm_minmax_fp32_ukernel_2x2__scalar_lrint, xnn_init_qs8_minmax_scalar_lrint_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qc8_gemm_minmax_fp32_ukernel_2x2__scalar_lrintf, xnn_init_qs8_minmax_scalar_lrintf_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
}
-TEST(QC8_GEMM_MINMAX_FP32_2X2__SCALAR_LRINT, k_eq_1_subtile_m) {
+TEST(QC8_GEMM_MINMAX_FP32_2X2__SCALAR_LRINTF, k_eq_1_subtile_m) {
for (uint32_t m = 1; m <= 2; m++) {
GemmMicrokernelTester()
.mr(2)
@@ -77577,11 +77577,11 @@
.n(2)
.k(1)
.iterations(1)
- .Test(xnn_qc8_gemm_minmax_fp32_ukernel_2x2__scalar_lrint, xnn_init_qs8_minmax_scalar_lrint_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qc8_gemm_minmax_fp32_ukernel_2x2__scalar_lrintf, xnn_init_qs8_minmax_scalar_lrintf_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
-TEST(QC8_GEMM_MINMAX_FP32_2X2__SCALAR_LRINT, k_eq_1_subtile_n) {
+TEST(QC8_GEMM_MINMAX_FP32_2X2__SCALAR_LRINTF, k_eq_1_subtile_n) {
for (uint32_t n = 1; n <= 2; n++) {
GemmMicrokernelTester()
.mr(2)
@@ -77592,11 +77592,11 @@
.n(n)
.k(1)
.iterations(1)
- .Test(xnn_qc8_gemm_minmax_fp32_ukernel_2x2__scalar_lrint, xnn_init_qs8_minmax_scalar_lrint_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qc8_gemm_minmax_fp32_ukernel_2x2__scalar_lrintf, xnn_init_qs8_minmax_scalar_lrintf_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
-TEST(QC8_GEMM_MINMAX_FP32_2X2__SCALAR_LRINT, k_gt_1) {
+TEST(QC8_GEMM_MINMAX_FP32_2X2__SCALAR_LRINTF, k_gt_1) {
for (size_t k = 2; k < 10; k++) {
GemmMicrokernelTester()
.mr(2)
@@ -77606,11 +77606,11 @@
.m(2)
.n(2)
.k(k)
- .Test(xnn_qc8_gemm_minmax_fp32_ukernel_2x2__scalar_lrint, xnn_init_qs8_minmax_scalar_lrint_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qc8_gemm_minmax_fp32_ukernel_2x2__scalar_lrintf, xnn_init_qs8_minmax_scalar_lrintf_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
-TEST(QC8_GEMM_MINMAX_FP32_2X2__SCALAR_LRINT, k_gt_1_strided_a) {
+TEST(QC8_GEMM_MINMAX_FP32_2X2__SCALAR_LRINTF, k_gt_1_strided_a) {
for (size_t k = 2; k < 10; k++) {
GemmMicrokernelTester()
.mr(2)
@@ -77621,11 +77621,11 @@
.n(2)
.k(k)
.a_stride(11)
- .Test(xnn_qc8_gemm_minmax_fp32_ukernel_2x2__scalar_lrint, xnn_init_qs8_minmax_scalar_lrint_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qc8_gemm_minmax_fp32_ukernel_2x2__scalar_lrintf, xnn_init_qs8_minmax_scalar_lrintf_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
-TEST(QC8_GEMM_MINMAX_FP32_2X2__SCALAR_LRINT, k_gt_1_subtile) {
+TEST(QC8_GEMM_MINMAX_FP32_2X2__SCALAR_LRINTF, k_gt_1_subtile) {
for (size_t k = 2; k < 10; k++) {
for (uint32_t m = 1; m <= 2; m++) {
for (uint32_t n = 1; n <= 2; n++) {
@@ -77638,13 +77638,13 @@
.n(n)
.k(k)
.iterations(1)
- .Test(xnn_qc8_gemm_minmax_fp32_ukernel_2x2__scalar_lrint, xnn_init_qs8_minmax_scalar_lrint_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qc8_gemm_minmax_fp32_ukernel_2x2__scalar_lrintf, xnn_init_qs8_minmax_scalar_lrintf_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
}
}
-TEST(QC8_GEMM_MINMAX_FP32_2X2__SCALAR_LRINT, n_gt_2) {
+TEST(QC8_GEMM_MINMAX_FP32_2X2__SCALAR_LRINTF, n_gt_2) {
for (uint32_t n = 3; n < 4; n++) {
for (size_t k = 1; k <= 5; k += 2) {
GemmMicrokernelTester()
@@ -77655,12 +77655,12 @@
.m(2)
.n(2)
.k(k)
- .Test(xnn_qc8_gemm_minmax_fp32_ukernel_2x2__scalar_lrint, xnn_init_qs8_minmax_scalar_lrint_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qc8_gemm_minmax_fp32_ukernel_2x2__scalar_lrintf, xnn_init_qs8_minmax_scalar_lrintf_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
}
-TEST(QC8_GEMM_MINMAX_FP32_2X2__SCALAR_LRINT, n_gt_2_strided_cn) {
+TEST(QC8_GEMM_MINMAX_FP32_2X2__SCALAR_LRINTF, n_gt_2_strided_cn) {
for (uint32_t n = 3; n < 4; n++) {
for (size_t k = 1; k <= 5; k += 2) {
GemmMicrokernelTester()
@@ -77672,12 +77672,12 @@
.n(2)
.k(k)
.cn_stride(5)
- .Test(xnn_qc8_gemm_minmax_fp32_ukernel_2x2__scalar_lrint, xnn_init_qs8_minmax_scalar_lrint_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qc8_gemm_minmax_fp32_ukernel_2x2__scalar_lrintf, xnn_init_qs8_minmax_scalar_lrintf_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
}
-TEST(QC8_GEMM_MINMAX_FP32_2X2__SCALAR_LRINT, n_gt_2_strided_a) {
+TEST(QC8_GEMM_MINMAX_FP32_2X2__SCALAR_LRINTF, n_gt_2_strided_a) {
for (uint32_t n = 3; n < 4; n++) {
for (size_t k = 1; k <= 5; k += 2) {
GemmMicrokernelTester()
@@ -77689,12 +77689,12 @@
.n(n)
.k(k)
.a_stride(7)
- .Test(xnn_qc8_gemm_minmax_fp32_ukernel_2x2__scalar_lrint, xnn_init_qs8_minmax_scalar_lrint_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qc8_gemm_minmax_fp32_ukernel_2x2__scalar_lrintf, xnn_init_qs8_minmax_scalar_lrintf_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
}
-TEST(QC8_GEMM_MINMAX_FP32_2X2__SCALAR_LRINT, n_gt_2_subtile) {
+TEST(QC8_GEMM_MINMAX_FP32_2X2__SCALAR_LRINTF, n_gt_2_subtile) {
for (uint32_t n = 3; n < 4; n++) {
for (size_t k = 1; k <= 5; k += 2) {
for (uint32_t m = 1; m <= 2; m++) {
@@ -77707,13 +77707,13 @@
.n(n)
.k(k)
.iterations(1)
- .Test(xnn_qc8_gemm_minmax_fp32_ukernel_2x2__scalar_lrint, xnn_init_qs8_minmax_scalar_lrint_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qc8_gemm_minmax_fp32_ukernel_2x2__scalar_lrintf, xnn_init_qs8_minmax_scalar_lrintf_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
}
}
-TEST(QC8_GEMM_MINMAX_FP32_2X2__SCALAR_LRINT, n_div_2) {
+TEST(QC8_GEMM_MINMAX_FP32_2X2__SCALAR_LRINTF, n_div_2) {
for (uint32_t n = 4; n <= 6; n += 2) {
for (size_t k = 1; k <= 5; k += 2) {
GemmMicrokernelTester()
@@ -77724,12 +77724,12 @@
.m(2)
.n(2)
.k(k)
- .Test(xnn_qc8_gemm_minmax_fp32_ukernel_2x2__scalar_lrint, xnn_init_qs8_minmax_scalar_lrint_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qc8_gemm_minmax_fp32_ukernel_2x2__scalar_lrintf, xnn_init_qs8_minmax_scalar_lrintf_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
}
-TEST(QC8_GEMM_MINMAX_FP32_2X2__SCALAR_LRINT, n_div_2_strided_cn) {
+TEST(QC8_GEMM_MINMAX_FP32_2X2__SCALAR_LRINTF, n_div_2_strided_cn) {
for (uint32_t n = 4; n <= 6; n += 2) {
for (size_t k = 1; k <= 5; k += 2) {
GemmMicrokernelTester()
@@ -77741,12 +77741,12 @@
.n(n)
.k(k)
.cn_stride(5)
- .Test(xnn_qc8_gemm_minmax_fp32_ukernel_2x2__scalar_lrint, xnn_init_qs8_minmax_scalar_lrint_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qc8_gemm_minmax_fp32_ukernel_2x2__scalar_lrintf, xnn_init_qs8_minmax_scalar_lrintf_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
}
-TEST(QC8_GEMM_MINMAX_FP32_2X2__SCALAR_LRINT, n_div_2_strided_a) {
+TEST(QC8_GEMM_MINMAX_FP32_2X2__SCALAR_LRINTF, n_div_2_strided_a) {
for (uint32_t n = 4; n <= 6; n += 2) {
for (size_t k = 1; k <= 5; k += 2) {
GemmMicrokernelTester()
@@ -77758,12 +77758,12 @@
.n(n)
.k(k)
.a_stride(7)
- .Test(xnn_qc8_gemm_minmax_fp32_ukernel_2x2__scalar_lrint, xnn_init_qs8_minmax_scalar_lrint_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qc8_gemm_minmax_fp32_ukernel_2x2__scalar_lrintf, xnn_init_qs8_minmax_scalar_lrintf_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
}
-TEST(QC8_GEMM_MINMAX_FP32_2X2__SCALAR_LRINT, n_div_2_subtile) {
+TEST(QC8_GEMM_MINMAX_FP32_2X2__SCALAR_LRINTF, n_div_2_subtile) {
for (uint32_t n = 4; n <= 6; n += 2) {
for (size_t k = 1; k <= 5; k += 2) {
for (uint32_t m = 1; m <= 2; m++) {
@@ -77776,13 +77776,13 @@
.n(n)
.k(k)
.iterations(1)
- .Test(xnn_qc8_gemm_minmax_fp32_ukernel_2x2__scalar_lrint, xnn_init_qs8_minmax_scalar_lrint_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qc8_gemm_minmax_fp32_ukernel_2x2__scalar_lrintf, xnn_init_qs8_minmax_scalar_lrintf_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
}
}
-TEST(QC8_GEMM_MINMAX_FP32_2X2__SCALAR_LRINT, strided_cm_subtile) {
+TEST(QC8_GEMM_MINMAX_FP32_2X2__SCALAR_LRINTF, strided_cm_subtile) {
for (size_t k = 1; k <= 5; k += 2) {
for (uint32_t m = 1; m <= 2; m++) {
for (uint32_t n = 1; n <= 2; n++) {
@@ -77796,13 +77796,13 @@
.k(k)
.cm_stride(5)
.iterations(1)
- .Test(xnn_qc8_gemm_minmax_fp32_ukernel_2x2__scalar_lrint, xnn_init_qs8_minmax_scalar_lrint_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qc8_gemm_minmax_fp32_ukernel_2x2__scalar_lrintf, xnn_init_qs8_minmax_scalar_lrintf_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
}
}
-TEST(QC8_GEMM_MINMAX_FP32_2X2__SCALAR_LRINT, qmin) {
+TEST(QC8_GEMM_MINMAX_FP32_2X2__SCALAR_LRINTF, qmin) {
GemmMicrokernelTester()
.mr(2)
.nr(2)
@@ -77812,10 +77812,10 @@
.n(2)
.k(1)
.qmin(128)
- .Test(xnn_qc8_gemm_minmax_fp32_ukernel_2x2__scalar_lrint, xnn_init_qs8_minmax_scalar_lrint_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qc8_gemm_minmax_fp32_ukernel_2x2__scalar_lrintf, xnn_init_qs8_minmax_scalar_lrintf_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
-TEST(QC8_GEMM_MINMAX_FP32_2X2__SCALAR_LRINT, qmax) {
+TEST(QC8_GEMM_MINMAX_FP32_2X2__SCALAR_LRINTF, qmax) {
GemmMicrokernelTester()
.mr(2)
.nr(2)
@@ -77825,10 +77825,10 @@
.n(2)
.k(1)
.qmax(128)
- .Test(xnn_qc8_gemm_minmax_fp32_ukernel_2x2__scalar_lrint, xnn_init_qs8_minmax_scalar_lrint_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qc8_gemm_minmax_fp32_ukernel_2x2__scalar_lrintf, xnn_init_qs8_minmax_scalar_lrintf_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
-TEST(QC8_GEMM_MINMAX_FP32_2X2__SCALAR_LRINT, strided_cm) {
+TEST(QC8_GEMM_MINMAX_FP32_2X2__SCALAR_LRINTF, strided_cm) {
GemmMicrokernelTester()
.mr(2)
.nr(2)
@@ -77838,11 +77838,11 @@
.n(2)
.k(1)
.cm_stride(5)
- .Test(xnn_qc8_gemm_minmax_fp32_ukernel_2x2__scalar_lrint, xnn_init_qs8_minmax_scalar_lrint_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qc8_gemm_minmax_fp32_ukernel_2x2__scalar_lrintf, xnn_init_qs8_minmax_scalar_lrintf_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
-TEST(QC8_GEMM_MINMAX_FP32_3X2__SCALAR_LRINT, k_eq_1) {
+TEST(QC8_GEMM_MINMAX_FP32_3X2__SCALAR_LRINTF, k_eq_1) {
GemmMicrokernelTester()
.mr(3)
.nr(2)
@@ -77851,10 +77851,10 @@
.m(3)
.n(2)
.k(1)
- .Test(xnn_qc8_gemm_minmax_fp32_ukernel_3x2__scalar_lrint, xnn_init_qs8_minmax_scalar_lrint_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qc8_gemm_minmax_fp32_ukernel_3x2__scalar_lrintf, xnn_init_qs8_minmax_scalar_lrintf_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
-TEST(QC8_GEMM_MINMAX_FP32_3X2__SCALAR_LRINT, strided_cn) {
+TEST(QC8_GEMM_MINMAX_FP32_3X2__SCALAR_LRINTF, strided_cn) {
GemmMicrokernelTester()
.mr(3)
.nr(2)
@@ -77864,10 +77864,10 @@
.n(2)
.k(1)
.cn_stride(5)
- .Test(xnn_qc8_gemm_minmax_fp32_ukernel_3x2__scalar_lrint, xnn_init_qs8_minmax_scalar_lrint_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qc8_gemm_minmax_fp32_ukernel_3x2__scalar_lrintf, xnn_init_qs8_minmax_scalar_lrintf_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
-TEST(QC8_GEMM_MINMAX_FP32_3X2__SCALAR_LRINT, k_eq_1_strided_a) {
+TEST(QC8_GEMM_MINMAX_FP32_3X2__SCALAR_LRINTF, k_eq_1_strided_a) {
GemmMicrokernelTester()
.mr(3)
.nr(2)
@@ -77877,10 +77877,10 @@
.n(2)
.k(1)
.a_stride(3)
- .Test(xnn_qc8_gemm_minmax_fp32_ukernel_3x2__scalar_lrint, xnn_init_qs8_minmax_scalar_lrint_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qc8_gemm_minmax_fp32_ukernel_3x2__scalar_lrintf, xnn_init_qs8_minmax_scalar_lrintf_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
-TEST(QC8_GEMM_MINMAX_FP32_3X2__SCALAR_LRINT, k_eq_1_subtile) {
+TEST(QC8_GEMM_MINMAX_FP32_3X2__SCALAR_LRINTF, k_eq_1_subtile) {
for (uint32_t m = 1; m <= 3; m++) {
for (uint32_t n = 1; n <= 2; n++) {
GemmMicrokernelTester()
@@ -77892,12 +77892,12 @@
.n(n)
.k(1)
.iterations(1)
- .Test(xnn_qc8_gemm_minmax_fp32_ukernel_3x2__scalar_lrint, xnn_init_qs8_minmax_scalar_lrint_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qc8_gemm_minmax_fp32_ukernel_3x2__scalar_lrintf, xnn_init_qs8_minmax_scalar_lrintf_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
}
-TEST(QC8_GEMM_MINMAX_FP32_3X2__SCALAR_LRINT, k_eq_1_subtile_m) {
+TEST(QC8_GEMM_MINMAX_FP32_3X2__SCALAR_LRINTF, k_eq_1_subtile_m) {
for (uint32_t m = 1; m <= 3; m++) {
GemmMicrokernelTester()
.mr(3)
@@ -77908,11 +77908,11 @@
.n(2)
.k(1)
.iterations(1)
- .Test(xnn_qc8_gemm_minmax_fp32_ukernel_3x2__scalar_lrint, xnn_init_qs8_minmax_scalar_lrint_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qc8_gemm_minmax_fp32_ukernel_3x2__scalar_lrintf, xnn_init_qs8_minmax_scalar_lrintf_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
-TEST(QC8_GEMM_MINMAX_FP32_3X2__SCALAR_LRINT, k_eq_1_subtile_n) {
+TEST(QC8_GEMM_MINMAX_FP32_3X2__SCALAR_LRINTF, k_eq_1_subtile_n) {
for (uint32_t n = 1; n <= 2; n++) {
GemmMicrokernelTester()
.mr(3)
@@ -77923,11 +77923,11 @@
.n(n)
.k(1)
.iterations(1)
- .Test(xnn_qc8_gemm_minmax_fp32_ukernel_3x2__scalar_lrint, xnn_init_qs8_minmax_scalar_lrint_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qc8_gemm_minmax_fp32_ukernel_3x2__scalar_lrintf, xnn_init_qs8_minmax_scalar_lrintf_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
-TEST(QC8_GEMM_MINMAX_FP32_3X2__SCALAR_LRINT, k_gt_1) {
+TEST(QC8_GEMM_MINMAX_FP32_3X2__SCALAR_LRINTF, k_gt_1) {
for (size_t k = 2; k < 10; k++) {
GemmMicrokernelTester()
.mr(3)
@@ -77937,11 +77937,11 @@
.m(3)
.n(2)
.k(k)
- .Test(xnn_qc8_gemm_minmax_fp32_ukernel_3x2__scalar_lrint, xnn_init_qs8_minmax_scalar_lrint_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qc8_gemm_minmax_fp32_ukernel_3x2__scalar_lrintf, xnn_init_qs8_minmax_scalar_lrintf_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
-TEST(QC8_GEMM_MINMAX_FP32_3X2__SCALAR_LRINT, k_gt_1_strided_a) {
+TEST(QC8_GEMM_MINMAX_FP32_3X2__SCALAR_LRINTF, k_gt_1_strided_a) {
for (size_t k = 2; k < 10; k++) {
GemmMicrokernelTester()
.mr(3)
@@ -77952,11 +77952,11 @@
.n(2)
.k(k)
.a_stride(11)
- .Test(xnn_qc8_gemm_minmax_fp32_ukernel_3x2__scalar_lrint, xnn_init_qs8_minmax_scalar_lrint_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qc8_gemm_minmax_fp32_ukernel_3x2__scalar_lrintf, xnn_init_qs8_minmax_scalar_lrintf_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
-TEST(QC8_GEMM_MINMAX_FP32_3X2__SCALAR_LRINT, k_gt_1_subtile) {
+TEST(QC8_GEMM_MINMAX_FP32_3X2__SCALAR_LRINTF, k_gt_1_subtile) {
for (size_t k = 2; k < 10; k++) {
for (uint32_t m = 1; m <= 3; m++) {
for (uint32_t n = 1; n <= 2; n++) {
@@ -77969,13 +77969,13 @@
.n(n)
.k(k)
.iterations(1)
- .Test(xnn_qc8_gemm_minmax_fp32_ukernel_3x2__scalar_lrint, xnn_init_qs8_minmax_scalar_lrint_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qc8_gemm_minmax_fp32_ukernel_3x2__scalar_lrintf, xnn_init_qs8_minmax_scalar_lrintf_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
}
}
-TEST(QC8_GEMM_MINMAX_FP32_3X2__SCALAR_LRINT, n_gt_2) {
+TEST(QC8_GEMM_MINMAX_FP32_3X2__SCALAR_LRINTF, n_gt_2) {
for (uint32_t n = 3; n < 4; n++) {
for (size_t k = 1; k <= 5; k += 2) {
GemmMicrokernelTester()
@@ -77986,12 +77986,12 @@
.m(3)
.n(2)
.k(k)
- .Test(xnn_qc8_gemm_minmax_fp32_ukernel_3x2__scalar_lrint, xnn_init_qs8_minmax_scalar_lrint_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qc8_gemm_minmax_fp32_ukernel_3x2__scalar_lrintf, xnn_init_qs8_minmax_scalar_lrintf_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
}
-TEST(QC8_GEMM_MINMAX_FP32_3X2__SCALAR_LRINT, n_gt_2_strided_cn) {
+TEST(QC8_GEMM_MINMAX_FP32_3X2__SCALAR_LRINTF, n_gt_2_strided_cn) {
for (uint32_t n = 3; n < 4; n++) {
for (size_t k = 1; k <= 5; k += 2) {
GemmMicrokernelTester()
@@ -78003,12 +78003,12 @@
.n(2)
.k(k)
.cn_stride(5)
- .Test(xnn_qc8_gemm_minmax_fp32_ukernel_3x2__scalar_lrint, xnn_init_qs8_minmax_scalar_lrint_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qc8_gemm_minmax_fp32_ukernel_3x2__scalar_lrintf, xnn_init_qs8_minmax_scalar_lrintf_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
}
-TEST(QC8_GEMM_MINMAX_FP32_3X2__SCALAR_LRINT, n_gt_2_strided_a) {
+TEST(QC8_GEMM_MINMAX_FP32_3X2__SCALAR_LRINTF, n_gt_2_strided_a) {
for (uint32_t n = 3; n < 4; n++) {
for (size_t k = 1; k <= 5; k += 2) {
GemmMicrokernelTester()
@@ -78020,12 +78020,12 @@
.n(n)
.k(k)
.a_stride(7)
- .Test(xnn_qc8_gemm_minmax_fp32_ukernel_3x2__scalar_lrint, xnn_init_qs8_minmax_scalar_lrint_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qc8_gemm_minmax_fp32_ukernel_3x2__scalar_lrintf, xnn_init_qs8_minmax_scalar_lrintf_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
}
-TEST(QC8_GEMM_MINMAX_FP32_3X2__SCALAR_LRINT, n_gt_2_subtile) {
+TEST(QC8_GEMM_MINMAX_FP32_3X2__SCALAR_LRINTF, n_gt_2_subtile) {
for (uint32_t n = 3; n < 4; n++) {
for (size_t k = 1; k <= 5; k += 2) {
for (uint32_t m = 1; m <= 3; m++) {
@@ -78038,13 +78038,13 @@
.n(n)
.k(k)
.iterations(1)
- .Test(xnn_qc8_gemm_minmax_fp32_ukernel_3x2__scalar_lrint, xnn_init_qs8_minmax_scalar_lrint_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qc8_gemm_minmax_fp32_ukernel_3x2__scalar_lrintf, xnn_init_qs8_minmax_scalar_lrintf_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
}
}
-TEST(QC8_GEMM_MINMAX_FP32_3X2__SCALAR_LRINT, n_div_2) {
+TEST(QC8_GEMM_MINMAX_FP32_3X2__SCALAR_LRINTF, n_div_2) {
for (uint32_t n = 4; n <= 6; n += 2) {
for (size_t k = 1; k <= 5; k += 2) {
GemmMicrokernelTester()
@@ -78055,12 +78055,12 @@
.m(3)
.n(2)
.k(k)
- .Test(xnn_qc8_gemm_minmax_fp32_ukernel_3x2__scalar_lrint, xnn_init_qs8_minmax_scalar_lrint_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qc8_gemm_minmax_fp32_ukernel_3x2__scalar_lrintf, xnn_init_qs8_minmax_scalar_lrintf_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
}
-TEST(QC8_GEMM_MINMAX_FP32_3X2__SCALAR_LRINT, n_div_2_strided_cn) {
+TEST(QC8_GEMM_MINMAX_FP32_3X2__SCALAR_LRINTF, n_div_2_strided_cn) {
for (uint32_t n = 4; n <= 6; n += 2) {
for (size_t k = 1; k <= 5; k += 2) {
GemmMicrokernelTester()
@@ -78072,12 +78072,12 @@
.n(n)
.k(k)
.cn_stride(5)
- .Test(xnn_qc8_gemm_minmax_fp32_ukernel_3x2__scalar_lrint, xnn_init_qs8_minmax_scalar_lrint_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qc8_gemm_minmax_fp32_ukernel_3x2__scalar_lrintf, xnn_init_qs8_minmax_scalar_lrintf_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
}
-TEST(QC8_GEMM_MINMAX_FP32_3X2__SCALAR_LRINT, n_div_2_strided_a) {
+TEST(QC8_GEMM_MINMAX_FP32_3X2__SCALAR_LRINTF, n_div_2_strided_a) {
for (uint32_t n = 4; n <= 6; n += 2) {
for (size_t k = 1; k <= 5; k += 2) {
GemmMicrokernelTester()
@@ -78089,12 +78089,12 @@
.n(n)
.k(k)
.a_stride(7)
- .Test(xnn_qc8_gemm_minmax_fp32_ukernel_3x2__scalar_lrint, xnn_init_qs8_minmax_scalar_lrint_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qc8_gemm_minmax_fp32_ukernel_3x2__scalar_lrintf, xnn_init_qs8_minmax_scalar_lrintf_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
}
-TEST(QC8_GEMM_MINMAX_FP32_3X2__SCALAR_LRINT, n_div_2_subtile) {
+TEST(QC8_GEMM_MINMAX_FP32_3X2__SCALAR_LRINTF, n_div_2_subtile) {
for (uint32_t n = 4; n <= 6; n += 2) {
for (size_t k = 1; k <= 5; k += 2) {
for (uint32_t m = 1; m <= 3; m++) {
@@ -78107,13 +78107,13 @@
.n(n)
.k(k)
.iterations(1)
- .Test(xnn_qc8_gemm_minmax_fp32_ukernel_3x2__scalar_lrint, xnn_init_qs8_minmax_scalar_lrint_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qc8_gemm_minmax_fp32_ukernel_3x2__scalar_lrintf, xnn_init_qs8_minmax_scalar_lrintf_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
}
}
-TEST(QC8_GEMM_MINMAX_FP32_3X2__SCALAR_LRINT, strided_cm_subtile) {
+TEST(QC8_GEMM_MINMAX_FP32_3X2__SCALAR_LRINTF, strided_cm_subtile) {
for (size_t k = 1; k <= 5; k += 2) {
for (uint32_t m = 1; m <= 3; m++) {
for (uint32_t n = 1; n <= 2; n++) {
@@ -78127,13 +78127,13 @@
.k(k)
.cm_stride(5)
.iterations(1)
- .Test(xnn_qc8_gemm_minmax_fp32_ukernel_3x2__scalar_lrint, xnn_init_qs8_minmax_scalar_lrint_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qc8_gemm_minmax_fp32_ukernel_3x2__scalar_lrintf, xnn_init_qs8_minmax_scalar_lrintf_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
}
}
-TEST(QC8_GEMM_MINMAX_FP32_3X2__SCALAR_LRINT, qmin) {
+TEST(QC8_GEMM_MINMAX_FP32_3X2__SCALAR_LRINTF, qmin) {
GemmMicrokernelTester()
.mr(3)
.nr(2)
@@ -78143,10 +78143,10 @@
.n(2)
.k(1)
.qmin(128)
- .Test(xnn_qc8_gemm_minmax_fp32_ukernel_3x2__scalar_lrint, xnn_init_qs8_minmax_scalar_lrint_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qc8_gemm_minmax_fp32_ukernel_3x2__scalar_lrintf, xnn_init_qs8_minmax_scalar_lrintf_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
-TEST(QC8_GEMM_MINMAX_FP32_3X2__SCALAR_LRINT, qmax) {
+TEST(QC8_GEMM_MINMAX_FP32_3X2__SCALAR_LRINTF, qmax) {
GemmMicrokernelTester()
.mr(3)
.nr(2)
@@ -78156,10 +78156,10 @@
.n(2)
.k(1)
.qmax(128)
- .Test(xnn_qc8_gemm_minmax_fp32_ukernel_3x2__scalar_lrint, xnn_init_qs8_minmax_scalar_lrint_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qc8_gemm_minmax_fp32_ukernel_3x2__scalar_lrintf, xnn_init_qs8_minmax_scalar_lrintf_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
-TEST(QC8_GEMM_MINMAX_FP32_3X2__SCALAR_LRINT, strided_cm) {
+TEST(QC8_GEMM_MINMAX_FP32_3X2__SCALAR_LRINTF, strided_cm) {
GemmMicrokernelTester()
.mr(3)
.nr(2)
@@ -78169,11 +78169,11 @@
.n(2)
.k(1)
.cm_stride(5)
- .Test(xnn_qc8_gemm_minmax_fp32_ukernel_3x2__scalar_lrint, xnn_init_qs8_minmax_scalar_lrint_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qc8_gemm_minmax_fp32_ukernel_3x2__scalar_lrintf, xnn_init_qs8_minmax_scalar_lrintf_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
-TEST(QC8_GEMM_MINMAX_FP32_4X2__SCALAR_LRINT, k_eq_1) {
+TEST(QC8_GEMM_MINMAX_FP32_4X2__SCALAR_LRINTF, k_eq_1) {
GemmMicrokernelTester()
.mr(4)
.nr(2)
@@ -78182,10 +78182,10 @@
.m(4)
.n(2)
.k(1)
- .Test(xnn_qc8_gemm_minmax_fp32_ukernel_4x2__scalar_lrint, xnn_init_qs8_minmax_scalar_lrint_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qc8_gemm_minmax_fp32_ukernel_4x2__scalar_lrintf, xnn_init_qs8_minmax_scalar_lrintf_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
-TEST(QC8_GEMM_MINMAX_FP32_4X2__SCALAR_LRINT, strided_cn) {
+TEST(QC8_GEMM_MINMAX_FP32_4X2__SCALAR_LRINTF, strided_cn) {
GemmMicrokernelTester()
.mr(4)
.nr(2)
@@ -78195,10 +78195,10 @@
.n(2)
.k(1)
.cn_stride(5)
- .Test(xnn_qc8_gemm_minmax_fp32_ukernel_4x2__scalar_lrint, xnn_init_qs8_minmax_scalar_lrint_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qc8_gemm_minmax_fp32_ukernel_4x2__scalar_lrintf, xnn_init_qs8_minmax_scalar_lrintf_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
-TEST(QC8_GEMM_MINMAX_FP32_4X2__SCALAR_LRINT, k_eq_1_strided_a) {
+TEST(QC8_GEMM_MINMAX_FP32_4X2__SCALAR_LRINTF, k_eq_1_strided_a) {
GemmMicrokernelTester()
.mr(4)
.nr(2)
@@ -78208,10 +78208,10 @@
.n(2)
.k(1)
.a_stride(3)
- .Test(xnn_qc8_gemm_minmax_fp32_ukernel_4x2__scalar_lrint, xnn_init_qs8_minmax_scalar_lrint_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qc8_gemm_minmax_fp32_ukernel_4x2__scalar_lrintf, xnn_init_qs8_minmax_scalar_lrintf_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
-TEST(QC8_GEMM_MINMAX_FP32_4X2__SCALAR_LRINT, k_eq_1_subtile) {
+TEST(QC8_GEMM_MINMAX_FP32_4X2__SCALAR_LRINTF, k_eq_1_subtile) {
for (uint32_t m = 1; m <= 4; m++) {
for (uint32_t n = 1; n <= 2; n++) {
GemmMicrokernelTester()
@@ -78223,12 +78223,12 @@
.n(n)
.k(1)
.iterations(1)
- .Test(xnn_qc8_gemm_minmax_fp32_ukernel_4x2__scalar_lrint, xnn_init_qs8_minmax_scalar_lrint_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qc8_gemm_minmax_fp32_ukernel_4x2__scalar_lrintf, xnn_init_qs8_minmax_scalar_lrintf_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
}
-TEST(QC8_GEMM_MINMAX_FP32_4X2__SCALAR_LRINT, k_eq_1_subtile_m) {
+TEST(QC8_GEMM_MINMAX_FP32_4X2__SCALAR_LRINTF, k_eq_1_subtile_m) {
for (uint32_t m = 1; m <= 4; m++) {
GemmMicrokernelTester()
.mr(4)
@@ -78239,11 +78239,11 @@
.n(2)
.k(1)
.iterations(1)
- .Test(xnn_qc8_gemm_minmax_fp32_ukernel_4x2__scalar_lrint, xnn_init_qs8_minmax_scalar_lrint_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qc8_gemm_minmax_fp32_ukernel_4x2__scalar_lrintf, xnn_init_qs8_minmax_scalar_lrintf_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
-TEST(QC8_GEMM_MINMAX_FP32_4X2__SCALAR_LRINT, k_eq_1_subtile_n) {
+TEST(QC8_GEMM_MINMAX_FP32_4X2__SCALAR_LRINTF, k_eq_1_subtile_n) {
for (uint32_t n = 1; n <= 2; n++) {
GemmMicrokernelTester()
.mr(4)
@@ -78254,11 +78254,11 @@
.n(n)
.k(1)
.iterations(1)
- .Test(xnn_qc8_gemm_minmax_fp32_ukernel_4x2__scalar_lrint, xnn_init_qs8_minmax_scalar_lrint_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qc8_gemm_minmax_fp32_ukernel_4x2__scalar_lrintf, xnn_init_qs8_minmax_scalar_lrintf_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
-TEST(QC8_GEMM_MINMAX_FP32_4X2__SCALAR_LRINT, k_gt_1) {
+TEST(QC8_GEMM_MINMAX_FP32_4X2__SCALAR_LRINTF, k_gt_1) {
for (size_t k = 2; k < 10; k++) {
GemmMicrokernelTester()
.mr(4)
@@ -78268,11 +78268,11 @@
.m(4)
.n(2)
.k(k)
- .Test(xnn_qc8_gemm_minmax_fp32_ukernel_4x2__scalar_lrint, xnn_init_qs8_minmax_scalar_lrint_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qc8_gemm_minmax_fp32_ukernel_4x2__scalar_lrintf, xnn_init_qs8_minmax_scalar_lrintf_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
-TEST(QC8_GEMM_MINMAX_FP32_4X2__SCALAR_LRINT, k_gt_1_strided_a) {
+TEST(QC8_GEMM_MINMAX_FP32_4X2__SCALAR_LRINTF, k_gt_1_strided_a) {
for (size_t k = 2; k < 10; k++) {
GemmMicrokernelTester()
.mr(4)
@@ -78283,11 +78283,11 @@
.n(2)
.k(k)
.a_stride(11)
- .Test(xnn_qc8_gemm_minmax_fp32_ukernel_4x2__scalar_lrint, xnn_init_qs8_minmax_scalar_lrint_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qc8_gemm_minmax_fp32_ukernel_4x2__scalar_lrintf, xnn_init_qs8_minmax_scalar_lrintf_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
-TEST(QC8_GEMM_MINMAX_FP32_4X2__SCALAR_LRINT, k_gt_1_subtile) {
+TEST(QC8_GEMM_MINMAX_FP32_4X2__SCALAR_LRINTF, k_gt_1_subtile) {
for (size_t k = 2; k < 10; k++) {
for (uint32_t m = 1; m <= 4; m++) {
for (uint32_t n = 1; n <= 2; n++) {
@@ -78300,13 +78300,13 @@
.n(n)
.k(k)
.iterations(1)
- .Test(xnn_qc8_gemm_minmax_fp32_ukernel_4x2__scalar_lrint, xnn_init_qs8_minmax_scalar_lrint_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qc8_gemm_minmax_fp32_ukernel_4x2__scalar_lrintf, xnn_init_qs8_minmax_scalar_lrintf_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
}
}
-TEST(QC8_GEMM_MINMAX_FP32_4X2__SCALAR_LRINT, n_gt_2) {
+TEST(QC8_GEMM_MINMAX_FP32_4X2__SCALAR_LRINTF, n_gt_2) {
for (uint32_t n = 3; n < 4; n++) {
for (size_t k = 1; k <= 5; k += 2) {
GemmMicrokernelTester()
@@ -78317,12 +78317,12 @@
.m(4)
.n(2)
.k(k)
- .Test(xnn_qc8_gemm_minmax_fp32_ukernel_4x2__scalar_lrint, xnn_init_qs8_minmax_scalar_lrint_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qc8_gemm_minmax_fp32_ukernel_4x2__scalar_lrintf, xnn_init_qs8_minmax_scalar_lrintf_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
}
-TEST(QC8_GEMM_MINMAX_FP32_4X2__SCALAR_LRINT, n_gt_2_strided_cn) {
+TEST(QC8_GEMM_MINMAX_FP32_4X2__SCALAR_LRINTF, n_gt_2_strided_cn) {
for (uint32_t n = 3; n < 4; n++) {
for (size_t k = 1; k <= 5; k += 2) {
GemmMicrokernelTester()
@@ -78334,12 +78334,12 @@
.n(2)
.k(k)
.cn_stride(5)
- .Test(xnn_qc8_gemm_minmax_fp32_ukernel_4x2__scalar_lrint, xnn_init_qs8_minmax_scalar_lrint_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qc8_gemm_minmax_fp32_ukernel_4x2__scalar_lrintf, xnn_init_qs8_minmax_scalar_lrintf_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
}
-TEST(QC8_GEMM_MINMAX_FP32_4X2__SCALAR_LRINT, n_gt_2_strided_a) {
+TEST(QC8_GEMM_MINMAX_FP32_4X2__SCALAR_LRINTF, n_gt_2_strided_a) {
for (uint32_t n = 3; n < 4; n++) {
for (size_t k = 1; k <= 5; k += 2) {
GemmMicrokernelTester()
@@ -78351,12 +78351,12 @@
.n(n)
.k(k)
.a_stride(7)
- .Test(xnn_qc8_gemm_minmax_fp32_ukernel_4x2__scalar_lrint, xnn_init_qs8_minmax_scalar_lrint_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qc8_gemm_minmax_fp32_ukernel_4x2__scalar_lrintf, xnn_init_qs8_minmax_scalar_lrintf_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
}
-TEST(QC8_GEMM_MINMAX_FP32_4X2__SCALAR_LRINT, n_gt_2_subtile) {
+TEST(QC8_GEMM_MINMAX_FP32_4X2__SCALAR_LRINTF, n_gt_2_subtile) {
for (uint32_t n = 3; n < 4; n++) {
for (size_t k = 1; k <= 5; k += 2) {
for (uint32_t m = 1; m <= 4; m++) {
@@ -78369,13 +78369,13 @@
.n(n)
.k(k)
.iterations(1)
- .Test(xnn_qc8_gemm_minmax_fp32_ukernel_4x2__scalar_lrint, xnn_init_qs8_minmax_scalar_lrint_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qc8_gemm_minmax_fp32_ukernel_4x2__scalar_lrintf, xnn_init_qs8_minmax_scalar_lrintf_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
}
}
-TEST(QC8_GEMM_MINMAX_FP32_4X2__SCALAR_LRINT, n_div_2) {
+TEST(QC8_GEMM_MINMAX_FP32_4X2__SCALAR_LRINTF, n_div_2) {
for (uint32_t n = 4; n <= 6; n += 2) {
for (size_t k = 1; k <= 5; k += 2) {
GemmMicrokernelTester()
@@ -78386,12 +78386,12 @@
.m(4)
.n(2)
.k(k)
- .Test(xnn_qc8_gemm_minmax_fp32_ukernel_4x2__scalar_lrint, xnn_init_qs8_minmax_scalar_lrint_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qc8_gemm_minmax_fp32_ukernel_4x2__scalar_lrintf, xnn_init_qs8_minmax_scalar_lrintf_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
}
-TEST(QC8_GEMM_MINMAX_FP32_4X2__SCALAR_LRINT, n_div_2_strided_cn) {
+TEST(QC8_GEMM_MINMAX_FP32_4X2__SCALAR_LRINTF, n_div_2_strided_cn) {
for (uint32_t n = 4; n <= 6; n += 2) {
for (size_t k = 1; k <= 5; k += 2) {
GemmMicrokernelTester()
@@ -78403,12 +78403,12 @@
.n(n)
.k(k)
.cn_stride(5)
- .Test(xnn_qc8_gemm_minmax_fp32_ukernel_4x2__scalar_lrint, xnn_init_qs8_minmax_scalar_lrint_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qc8_gemm_minmax_fp32_ukernel_4x2__scalar_lrintf, xnn_init_qs8_minmax_scalar_lrintf_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
}
-TEST(QC8_GEMM_MINMAX_FP32_4X2__SCALAR_LRINT, n_div_2_strided_a) {
+TEST(QC8_GEMM_MINMAX_FP32_4X2__SCALAR_LRINTF, n_div_2_strided_a) {
for (uint32_t n = 4; n <= 6; n += 2) {
for (size_t k = 1; k <= 5; k += 2) {
GemmMicrokernelTester()
@@ -78420,12 +78420,12 @@
.n(n)
.k(k)
.a_stride(7)
- .Test(xnn_qc8_gemm_minmax_fp32_ukernel_4x2__scalar_lrint, xnn_init_qs8_minmax_scalar_lrint_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qc8_gemm_minmax_fp32_ukernel_4x2__scalar_lrintf, xnn_init_qs8_minmax_scalar_lrintf_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
}
-TEST(QC8_GEMM_MINMAX_FP32_4X2__SCALAR_LRINT, n_div_2_subtile) {
+TEST(QC8_GEMM_MINMAX_FP32_4X2__SCALAR_LRINTF, n_div_2_subtile) {
for (uint32_t n = 4; n <= 6; n += 2) {
for (size_t k = 1; k <= 5; k += 2) {
for (uint32_t m = 1; m <= 4; m++) {
@@ -78438,13 +78438,13 @@
.n(n)
.k(k)
.iterations(1)
- .Test(xnn_qc8_gemm_minmax_fp32_ukernel_4x2__scalar_lrint, xnn_init_qs8_minmax_scalar_lrint_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qc8_gemm_minmax_fp32_ukernel_4x2__scalar_lrintf, xnn_init_qs8_minmax_scalar_lrintf_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
}
}
-TEST(QC8_GEMM_MINMAX_FP32_4X2__SCALAR_LRINT, strided_cm_subtile) {
+TEST(QC8_GEMM_MINMAX_FP32_4X2__SCALAR_LRINTF, strided_cm_subtile) {
for (size_t k = 1; k <= 5; k += 2) {
for (uint32_t m = 1; m <= 4; m++) {
for (uint32_t n = 1; n <= 2; n++) {
@@ -78458,13 +78458,13 @@
.k(k)
.cm_stride(5)
.iterations(1)
- .Test(xnn_qc8_gemm_minmax_fp32_ukernel_4x2__scalar_lrint, xnn_init_qs8_minmax_scalar_lrint_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qc8_gemm_minmax_fp32_ukernel_4x2__scalar_lrintf, xnn_init_qs8_minmax_scalar_lrintf_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
}
}
-TEST(QC8_GEMM_MINMAX_FP32_4X2__SCALAR_LRINT, qmin) {
+TEST(QC8_GEMM_MINMAX_FP32_4X2__SCALAR_LRINTF, qmin) {
GemmMicrokernelTester()
.mr(4)
.nr(2)
@@ -78474,10 +78474,10 @@
.n(2)
.k(1)
.qmin(128)
- .Test(xnn_qc8_gemm_minmax_fp32_ukernel_4x2__scalar_lrint, xnn_init_qs8_minmax_scalar_lrint_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qc8_gemm_minmax_fp32_ukernel_4x2__scalar_lrintf, xnn_init_qs8_minmax_scalar_lrintf_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
-TEST(QC8_GEMM_MINMAX_FP32_4X2__SCALAR_LRINT, qmax) {
+TEST(QC8_GEMM_MINMAX_FP32_4X2__SCALAR_LRINTF, qmax) {
GemmMicrokernelTester()
.mr(4)
.nr(2)
@@ -78487,10 +78487,10 @@
.n(2)
.k(1)
.qmax(128)
- .Test(xnn_qc8_gemm_minmax_fp32_ukernel_4x2__scalar_lrint, xnn_init_qs8_minmax_scalar_lrint_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qc8_gemm_minmax_fp32_ukernel_4x2__scalar_lrintf, xnn_init_qs8_minmax_scalar_lrintf_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
-TEST(QC8_GEMM_MINMAX_FP32_4X2__SCALAR_LRINT, strided_cm) {
+TEST(QC8_GEMM_MINMAX_FP32_4X2__SCALAR_LRINTF, strided_cm) {
GemmMicrokernelTester()
.mr(4)
.nr(2)
@@ -78500,11 +78500,11 @@
.n(2)
.k(1)
.cm_stride(5)
- .Test(xnn_qc8_gemm_minmax_fp32_ukernel_4x2__scalar_lrint, xnn_init_qs8_minmax_scalar_lrint_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qc8_gemm_minmax_fp32_ukernel_4x2__scalar_lrintf, xnn_init_qs8_minmax_scalar_lrintf_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
-TEST(QC8_GEMM_MINMAX_FP32_1X4__SCALAR_LRINT, k_eq_1) {
+TEST(QC8_GEMM_MINMAX_FP32_1X4__SCALAR_LRINTF, k_eq_1) {
GemmMicrokernelTester()
.mr(1)
.nr(4)
@@ -78513,10 +78513,10 @@
.m(1)
.n(4)
.k(1)
- .Test(xnn_qc8_gemm_minmax_fp32_ukernel_1x4__scalar_lrint, xnn_init_qs8_minmax_scalar_lrint_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qc8_gemm_minmax_fp32_ukernel_1x4__scalar_lrintf, xnn_init_qs8_minmax_scalar_lrintf_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
-TEST(QC8_GEMM_MINMAX_FP32_1X4__SCALAR_LRINT, strided_cn) {
+TEST(QC8_GEMM_MINMAX_FP32_1X4__SCALAR_LRINTF, strided_cn) {
GemmMicrokernelTester()
.mr(1)
.nr(4)
@@ -78526,10 +78526,10 @@
.n(4)
.k(1)
.cn_stride(7)
- .Test(xnn_qc8_gemm_minmax_fp32_ukernel_1x4__scalar_lrint, xnn_init_qs8_minmax_scalar_lrint_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qc8_gemm_minmax_fp32_ukernel_1x4__scalar_lrintf, xnn_init_qs8_minmax_scalar_lrintf_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
-TEST(QC8_GEMM_MINMAX_FP32_1X4__SCALAR_LRINT, k_eq_1_strided_a) {
+TEST(QC8_GEMM_MINMAX_FP32_1X4__SCALAR_LRINTF, k_eq_1_strided_a) {
GemmMicrokernelTester()
.mr(1)
.nr(4)
@@ -78539,10 +78539,10 @@
.n(4)
.k(1)
.a_stride(3)
- .Test(xnn_qc8_gemm_minmax_fp32_ukernel_1x4__scalar_lrint, xnn_init_qs8_minmax_scalar_lrint_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qc8_gemm_minmax_fp32_ukernel_1x4__scalar_lrintf, xnn_init_qs8_minmax_scalar_lrintf_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
-TEST(QC8_GEMM_MINMAX_FP32_1X4__SCALAR_LRINT, k_eq_1_subtile) {
+TEST(QC8_GEMM_MINMAX_FP32_1X4__SCALAR_LRINTF, k_eq_1_subtile) {
for (uint32_t m = 1; m <= 1; m++) {
for (uint32_t n = 1; n <= 4; n++) {
GemmMicrokernelTester()
@@ -78554,12 +78554,12 @@
.n(n)
.k(1)
.iterations(1)
- .Test(xnn_qc8_gemm_minmax_fp32_ukernel_1x4__scalar_lrint, xnn_init_qs8_minmax_scalar_lrint_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qc8_gemm_minmax_fp32_ukernel_1x4__scalar_lrintf, xnn_init_qs8_minmax_scalar_lrintf_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
}
-TEST(QC8_GEMM_MINMAX_FP32_1X4__SCALAR_LRINT, k_eq_1_subtile_m) {
+TEST(QC8_GEMM_MINMAX_FP32_1X4__SCALAR_LRINTF, k_eq_1_subtile_m) {
for (uint32_t m = 1; m <= 1; m++) {
GemmMicrokernelTester()
.mr(1)
@@ -78570,11 +78570,11 @@
.n(4)
.k(1)
.iterations(1)
- .Test(xnn_qc8_gemm_minmax_fp32_ukernel_1x4__scalar_lrint, xnn_init_qs8_minmax_scalar_lrint_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qc8_gemm_minmax_fp32_ukernel_1x4__scalar_lrintf, xnn_init_qs8_minmax_scalar_lrintf_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
-TEST(QC8_GEMM_MINMAX_FP32_1X4__SCALAR_LRINT, k_eq_1_subtile_n) {
+TEST(QC8_GEMM_MINMAX_FP32_1X4__SCALAR_LRINTF, k_eq_1_subtile_n) {
for (uint32_t n = 1; n <= 4; n++) {
GemmMicrokernelTester()
.mr(1)
@@ -78585,11 +78585,11 @@
.n(n)
.k(1)
.iterations(1)
- .Test(xnn_qc8_gemm_minmax_fp32_ukernel_1x4__scalar_lrint, xnn_init_qs8_minmax_scalar_lrint_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qc8_gemm_minmax_fp32_ukernel_1x4__scalar_lrintf, xnn_init_qs8_minmax_scalar_lrintf_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
-TEST(QC8_GEMM_MINMAX_FP32_1X4__SCALAR_LRINT, k_gt_1) {
+TEST(QC8_GEMM_MINMAX_FP32_1X4__SCALAR_LRINTF, k_gt_1) {
for (size_t k = 2; k < 10; k++) {
GemmMicrokernelTester()
.mr(1)
@@ -78599,11 +78599,11 @@
.m(1)
.n(4)
.k(k)
- .Test(xnn_qc8_gemm_minmax_fp32_ukernel_1x4__scalar_lrint, xnn_init_qs8_minmax_scalar_lrint_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qc8_gemm_minmax_fp32_ukernel_1x4__scalar_lrintf, xnn_init_qs8_minmax_scalar_lrintf_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
-TEST(QC8_GEMM_MINMAX_FP32_1X4__SCALAR_LRINT, k_gt_1_strided_a) {
+TEST(QC8_GEMM_MINMAX_FP32_1X4__SCALAR_LRINTF, k_gt_1_strided_a) {
for (size_t k = 2; k < 10; k++) {
GemmMicrokernelTester()
.mr(1)
@@ -78614,11 +78614,11 @@
.n(4)
.k(k)
.a_stride(11)
- .Test(xnn_qc8_gemm_minmax_fp32_ukernel_1x4__scalar_lrint, xnn_init_qs8_minmax_scalar_lrint_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qc8_gemm_minmax_fp32_ukernel_1x4__scalar_lrintf, xnn_init_qs8_minmax_scalar_lrintf_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
-TEST(QC8_GEMM_MINMAX_FP32_1X4__SCALAR_LRINT, k_gt_1_subtile) {
+TEST(QC8_GEMM_MINMAX_FP32_1X4__SCALAR_LRINTF, k_gt_1_subtile) {
for (size_t k = 2; k < 10; k++) {
for (uint32_t m = 1; m <= 1; m++) {
for (uint32_t n = 1; n <= 4; n++) {
@@ -78631,13 +78631,13 @@
.n(n)
.k(k)
.iterations(1)
- .Test(xnn_qc8_gemm_minmax_fp32_ukernel_1x4__scalar_lrint, xnn_init_qs8_minmax_scalar_lrint_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qc8_gemm_minmax_fp32_ukernel_1x4__scalar_lrintf, xnn_init_qs8_minmax_scalar_lrintf_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
}
}
-TEST(QC8_GEMM_MINMAX_FP32_1X4__SCALAR_LRINT, n_gt_4) {
+TEST(QC8_GEMM_MINMAX_FP32_1X4__SCALAR_LRINTF, n_gt_4) {
for (uint32_t n = 5; n < 8; n++) {
for (size_t k = 1; k <= 5; k += 2) {
GemmMicrokernelTester()
@@ -78648,12 +78648,12 @@
.m(1)
.n(4)
.k(k)
- .Test(xnn_qc8_gemm_minmax_fp32_ukernel_1x4__scalar_lrint, xnn_init_qs8_minmax_scalar_lrint_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qc8_gemm_minmax_fp32_ukernel_1x4__scalar_lrintf, xnn_init_qs8_minmax_scalar_lrintf_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
}
-TEST(QC8_GEMM_MINMAX_FP32_1X4__SCALAR_LRINT, n_gt_4_strided_cn) {
+TEST(QC8_GEMM_MINMAX_FP32_1X4__SCALAR_LRINTF, n_gt_4_strided_cn) {
for (uint32_t n = 5; n < 8; n++) {
for (size_t k = 1; k <= 5; k += 2) {
GemmMicrokernelTester()
@@ -78665,12 +78665,12 @@
.n(4)
.k(k)
.cn_stride(7)
- .Test(xnn_qc8_gemm_minmax_fp32_ukernel_1x4__scalar_lrint, xnn_init_qs8_minmax_scalar_lrint_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qc8_gemm_minmax_fp32_ukernel_1x4__scalar_lrintf, xnn_init_qs8_minmax_scalar_lrintf_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
}
-TEST(QC8_GEMM_MINMAX_FP32_1X4__SCALAR_LRINT, n_gt_4_strided_a) {
+TEST(QC8_GEMM_MINMAX_FP32_1X4__SCALAR_LRINTF, n_gt_4_strided_a) {
for (uint32_t n = 5; n < 8; n++) {
for (size_t k = 1; k <= 5; k += 2) {
GemmMicrokernelTester()
@@ -78682,12 +78682,12 @@
.n(n)
.k(k)
.a_stride(7)
- .Test(xnn_qc8_gemm_minmax_fp32_ukernel_1x4__scalar_lrint, xnn_init_qs8_minmax_scalar_lrint_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qc8_gemm_minmax_fp32_ukernel_1x4__scalar_lrintf, xnn_init_qs8_minmax_scalar_lrintf_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
}
-TEST(QC8_GEMM_MINMAX_FP32_1X4__SCALAR_LRINT, n_gt_4_subtile) {
+TEST(QC8_GEMM_MINMAX_FP32_1X4__SCALAR_LRINTF, n_gt_4_subtile) {
for (uint32_t n = 5; n < 8; n++) {
for (size_t k = 1; k <= 5; k += 2) {
for (uint32_t m = 1; m <= 1; m++) {
@@ -78700,13 +78700,13 @@
.n(n)
.k(k)
.iterations(1)
- .Test(xnn_qc8_gemm_minmax_fp32_ukernel_1x4__scalar_lrint, xnn_init_qs8_minmax_scalar_lrint_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qc8_gemm_minmax_fp32_ukernel_1x4__scalar_lrintf, xnn_init_qs8_minmax_scalar_lrintf_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
}
}
-TEST(QC8_GEMM_MINMAX_FP32_1X4__SCALAR_LRINT, n_div_4) {
+TEST(QC8_GEMM_MINMAX_FP32_1X4__SCALAR_LRINTF, n_div_4) {
for (uint32_t n = 8; n <= 12; n += 4) {
for (size_t k = 1; k <= 5; k += 2) {
GemmMicrokernelTester()
@@ -78717,12 +78717,12 @@
.m(1)
.n(4)
.k(k)
- .Test(xnn_qc8_gemm_minmax_fp32_ukernel_1x4__scalar_lrint, xnn_init_qs8_minmax_scalar_lrint_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qc8_gemm_minmax_fp32_ukernel_1x4__scalar_lrintf, xnn_init_qs8_minmax_scalar_lrintf_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
}
-TEST(QC8_GEMM_MINMAX_FP32_1X4__SCALAR_LRINT, n_div_4_strided_cn) {
+TEST(QC8_GEMM_MINMAX_FP32_1X4__SCALAR_LRINTF, n_div_4_strided_cn) {
for (uint32_t n = 8; n <= 12; n += 4) {
for (size_t k = 1; k <= 5; k += 2) {
GemmMicrokernelTester()
@@ -78734,12 +78734,12 @@
.n(n)
.k(k)
.cn_stride(7)
- .Test(xnn_qc8_gemm_minmax_fp32_ukernel_1x4__scalar_lrint, xnn_init_qs8_minmax_scalar_lrint_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qc8_gemm_minmax_fp32_ukernel_1x4__scalar_lrintf, xnn_init_qs8_minmax_scalar_lrintf_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
}
-TEST(QC8_GEMM_MINMAX_FP32_1X4__SCALAR_LRINT, n_div_4_strided_a) {
+TEST(QC8_GEMM_MINMAX_FP32_1X4__SCALAR_LRINTF, n_div_4_strided_a) {
for (uint32_t n = 8; n <= 12; n += 4) {
for (size_t k = 1; k <= 5; k += 2) {
GemmMicrokernelTester()
@@ -78751,12 +78751,12 @@
.n(n)
.k(k)
.a_stride(7)
- .Test(xnn_qc8_gemm_minmax_fp32_ukernel_1x4__scalar_lrint, xnn_init_qs8_minmax_scalar_lrint_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qc8_gemm_minmax_fp32_ukernel_1x4__scalar_lrintf, xnn_init_qs8_minmax_scalar_lrintf_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
}
-TEST(QC8_GEMM_MINMAX_FP32_1X4__SCALAR_LRINT, n_div_4_subtile) {
+TEST(QC8_GEMM_MINMAX_FP32_1X4__SCALAR_LRINTF, n_div_4_subtile) {
for (uint32_t n = 8; n <= 12; n += 4) {
for (size_t k = 1; k <= 5; k += 2) {
for (uint32_t m = 1; m <= 1; m++) {
@@ -78769,13 +78769,13 @@
.n(n)
.k(k)
.iterations(1)
- .Test(xnn_qc8_gemm_minmax_fp32_ukernel_1x4__scalar_lrint, xnn_init_qs8_minmax_scalar_lrint_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qc8_gemm_minmax_fp32_ukernel_1x4__scalar_lrintf, xnn_init_qs8_minmax_scalar_lrintf_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
}
}
-TEST(QC8_GEMM_MINMAX_FP32_1X4__SCALAR_LRINT, strided_cm_subtile) {
+TEST(QC8_GEMM_MINMAX_FP32_1X4__SCALAR_LRINTF, strided_cm_subtile) {
for (size_t k = 1; k <= 5; k += 2) {
for (uint32_t m = 1; m <= 1; m++) {
for (uint32_t n = 1; n <= 4; n++) {
@@ -78789,13 +78789,13 @@
.k(k)
.cm_stride(7)
.iterations(1)
- .Test(xnn_qc8_gemm_minmax_fp32_ukernel_1x4__scalar_lrint, xnn_init_qs8_minmax_scalar_lrint_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qc8_gemm_minmax_fp32_ukernel_1x4__scalar_lrintf, xnn_init_qs8_minmax_scalar_lrintf_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
}
}
-TEST(QC8_GEMM_MINMAX_FP32_1X4__SCALAR_LRINT, qmin) {
+TEST(QC8_GEMM_MINMAX_FP32_1X4__SCALAR_LRINTF, qmin) {
GemmMicrokernelTester()
.mr(1)
.nr(4)
@@ -78805,10 +78805,10 @@
.n(4)
.k(1)
.qmin(128)
- .Test(xnn_qc8_gemm_minmax_fp32_ukernel_1x4__scalar_lrint, xnn_init_qs8_minmax_scalar_lrint_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qc8_gemm_minmax_fp32_ukernel_1x4__scalar_lrintf, xnn_init_qs8_minmax_scalar_lrintf_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
-TEST(QC8_GEMM_MINMAX_FP32_1X4__SCALAR_LRINT, qmax) {
+TEST(QC8_GEMM_MINMAX_FP32_1X4__SCALAR_LRINTF, qmax) {
GemmMicrokernelTester()
.mr(1)
.nr(4)
@@ -78818,10 +78818,10 @@
.n(4)
.k(1)
.qmax(128)
- .Test(xnn_qc8_gemm_minmax_fp32_ukernel_1x4__scalar_lrint, xnn_init_qs8_minmax_scalar_lrint_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qc8_gemm_minmax_fp32_ukernel_1x4__scalar_lrintf, xnn_init_qs8_minmax_scalar_lrintf_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
-TEST(QC8_GEMM_MINMAX_FP32_1X4__SCALAR_LRINT, strided_cm) {
+TEST(QC8_GEMM_MINMAX_FP32_1X4__SCALAR_LRINTF, strided_cm) {
GemmMicrokernelTester()
.mr(1)
.nr(4)
@@ -78831,11 +78831,11 @@
.n(4)
.k(1)
.cm_stride(7)
- .Test(xnn_qc8_gemm_minmax_fp32_ukernel_1x4__scalar_lrint, xnn_init_qs8_minmax_scalar_lrint_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qc8_gemm_minmax_fp32_ukernel_1x4__scalar_lrintf, xnn_init_qs8_minmax_scalar_lrintf_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
-TEST(QC8_GEMM_MINMAX_FP32_2X4__SCALAR_LRINT, k_eq_1) {
+TEST(QC8_GEMM_MINMAX_FP32_2X4__SCALAR_LRINTF, k_eq_1) {
GemmMicrokernelTester()
.mr(2)
.nr(4)
@@ -78844,10 +78844,10 @@
.m(2)
.n(4)
.k(1)
- .Test(xnn_qc8_gemm_minmax_fp32_ukernel_2x4__scalar_lrint, xnn_init_qs8_minmax_scalar_lrint_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qc8_gemm_minmax_fp32_ukernel_2x4__scalar_lrintf, xnn_init_qs8_minmax_scalar_lrintf_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
-TEST(QC8_GEMM_MINMAX_FP32_2X4__SCALAR_LRINT, strided_cn) {
+TEST(QC8_GEMM_MINMAX_FP32_2X4__SCALAR_LRINTF, strided_cn) {
GemmMicrokernelTester()
.mr(2)
.nr(4)
@@ -78857,10 +78857,10 @@
.n(4)
.k(1)
.cn_stride(7)
- .Test(xnn_qc8_gemm_minmax_fp32_ukernel_2x4__scalar_lrint, xnn_init_qs8_minmax_scalar_lrint_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qc8_gemm_minmax_fp32_ukernel_2x4__scalar_lrintf, xnn_init_qs8_minmax_scalar_lrintf_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
-TEST(QC8_GEMM_MINMAX_FP32_2X4__SCALAR_LRINT, k_eq_1_strided_a) {
+TEST(QC8_GEMM_MINMAX_FP32_2X4__SCALAR_LRINTF, k_eq_1_strided_a) {
GemmMicrokernelTester()
.mr(2)
.nr(4)
@@ -78870,10 +78870,10 @@
.n(4)
.k(1)
.a_stride(3)
- .Test(xnn_qc8_gemm_minmax_fp32_ukernel_2x4__scalar_lrint, xnn_init_qs8_minmax_scalar_lrint_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qc8_gemm_minmax_fp32_ukernel_2x4__scalar_lrintf, xnn_init_qs8_minmax_scalar_lrintf_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
-TEST(QC8_GEMM_MINMAX_FP32_2X4__SCALAR_LRINT, k_eq_1_subtile) {
+TEST(QC8_GEMM_MINMAX_FP32_2X4__SCALAR_LRINTF, k_eq_1_subtile) {
for (uint32_t m = 1; m <= 2; m++) {
for (uint32_t n = 1; n <= 4; n++) {
GemmMicrokernelTester()
@@ -78885,12 +78885,12 @@
.n(n)
.k(1)
.iterations(1)
- .Test(xnn_qc8_gemm_minmax_fp32_ukernel_2x4__scalar_lrint, xnn_init_qs8_minmax_scalar_lrint_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qc8_gemm_minmax_fp32_ukernel_2x4__scalar_lrintf, xnn_init_qs8_minmax_scalar_lrintf_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
}
-TEST(QC8_GEMM_MINMAX_FP32_2X4__SCALAR_LRINT, k_eq_1_subtile_m) {
+TEST(QC8_GEMM_MINMAX_FP32_2X4__SCALAR_LRINTF, k_eq_1_subtile_m) {
for (uint32_t m = 1; m <= 2; m++) {
GemmMicrokernelTester()
.mr(2)
@@ -78901,11 +78901,11 @@
.n(4)
.k(1)
.iterations(1)
- .Test(xnn_qc8_gemm_minmax_fp32_ukernel_2x4__scalar_lrint, xnn_init_qs8_minmax_scalar_lrint_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qc8_gemm_minmax_fp32_ukernel_2x4__scalar_lrintf, xnn_init_qs8_minmax_scalar_lrintf_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
-TEST(QC8_GEMM_MINMAX_FP32_2X4__SCALAR_LRINT, k_eq_1_subtile_n) {
+TEST(QC8_GEMM_MINMAX_FP32_2X4__SCALAR_LRINTF, k_eq_1_subtile_n) {
for (uint32_t n = 1; n <= 4; n++) {
GemmMicrokernelTester()
.mr(2)
@@ -78916,11 +78916,11 @@
.n(n)
.k(1)
.iterations(1)
- .Test(xnn_qc8_gemm_minmax_fp32_ukernel_2x4__scalar_lrint, xnn_init_qs8_minmax_scalar_lrint_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qc8_gemm_minmax_fp32_ukernel_2x4__scalar_lrintf, xnn_init_qs8_minmax_scalar_lrintf_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
-TEST(QC8_GEMM_MINMAX_FP32_2X4__SCALAR_LRINT, k_gt_1) {
+TEST(QC8_GEMM_MINMAX_FP32_2X4__SCALAR_LRINTF, k_gt_1) {
for (size_t k = 2; k < 10; k++) {
GemmMicrokernelTester()
.mr(2)
@@ -78930,11 +78930,11 @@
.m(2)
.n(4)
.k(k)
- .Test(xnn_qc8_gemm_minmax_fp32_ukernel_2x4__scalar_lrint, xnn_init_qs8_minmax_scalar_lrint_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qc8_gemm_minmax_fp32_ukernel_2x4__scalar_lrintf, xnn_init_qs8_minmax_scalar_lrintf_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
-TEST(QC8_GEMM_MINMAX_FP32_2X4__SCALAR_LRINT, k_gt_1_strided_a) {
+TEST(QC8_GEMM_MINMAX_FP32_2X4__SCALAR_LRINTF, k_gt_1_strided_a) {
for (size_t k = 2; k < 10; k++) {
GemmMicrokernelTester()
.mr(2)
@@ -78945,11 +78945,11 @@
.n(4)
.k(k)
.a_stride(11)
- .Test(xnn_qc8_gemm_minmax_fp32_ukernel_2x4__scalar_lrint, xnn_init_qs8_minmax_scalar_lrint_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qc8_gemm_minmax_fp32_ukernel_2x4__scalar_lrintf, xnn_init_qs8_minmax_scalar_lrintf_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
-TEST(QC8_GEMM_MINMAX_FP32_2X4__SCALAR_LRINT, k_gt_1_subtile) {
+TEST(QC8_GEMM_MINMAX_FP32_2X4__SCALAR_LRINTF, k_gt_1_subtile) {
for (size_t k = 2; k < 10; k++) {
for (uint32_t m = 1; m <= 2; m++) {
for (uint32_t n = 1; n <= 4; n++) {
@@ -78962,13 +78962,13 @@
.n(n)
.k(k)
.iterations(1)
- .Test(xnn_qc8_gemm_minmax_fp32_ukernel_2x4__scalar_lrint, xnn_init_qs8_minmax_scalar_lrint_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qc8_gemm_minmax_fp32_ukernel_2x4__scalar_lrintf, xnn_init_qs8_minmax_scalar_lrintf_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
}
}
-TEST(QC8_GEMM_MINMAX_FP32_2X4__SCALAR_LRINT, n_gt_4) {
+TEST(QC8_GEMM_MINMAX_FP32_2X4__SCALAR_LRINTF, n_gt_4) {
for (uint32_t n = 5; n < 8; n++) {
for (size_t k = 1; k <= 5; k += 2) {
GemmMicrokernelTester()
@@ -78979,12 +78979,12 @@
.m(2)
.n(4)
.k(k)
- .Test(xnn_qc8_gemm_minmax_fp32_ukernel_2x4__scalar_lrint, xnn_init_qs8_minmax_scalar_lrint_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qc8_gemm_minmax_fp32_ukernel_2x4__scalar_lrintf, xnn_init_qs8_minmax_scalar_lrintf_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
}
-TEST(QC8_GEMM_MINMAX_FP32_2X4__SCALAR_LRINT, n_gt_4_strided_cn) {
+TEST(QC8_GEMM_MINMAX_FP32_2X4__SCALAR_LRINTF, n_gt_4_strided_cn) {
for (uint32_t n = 5; n < 8; n++) {
for (size_t k = 1; k <= 5; k += 2) {
GemmMicrokernelTester()
@@ -78996,12 +78996,12 @@
.n(4)
.k(k)
.cn_stride(7)
- .Test(xnn_qc8_gemm_minmax_fp32_ukernel_2x4__scalar_lrint, xnn_init_qs8_minmax_scalar_lrint_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qc8_gemm_minmax_fp32_ukernel_2x4__scalar_lrintf, xnn_init_qs8_minmax_scalar_lrintf_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
}
-TEST(QC8_GEMM_MINMAX_FP32_2X4__SCALAR_LRINT, n_gt_4_strided_a) {
+TEST(QC8_GEMM_MINMAX_FP32_2X4__SCALAR_LRINTF, n_gt_4_strided_a) {
for (uint32_t n = 5; n < 8; n++) {
for (size_t k = 1; k <= 5; k += 2) {
GemmMicrokernelTester()
@@ -79013,12 +79013,12 @@
.n(n)
.k(k)
.a_stride(7)
- .Test(xnn_qc8_gemm_minmax_fp32_ukernel_2x4__scalar_lrint, xnn_init_qs8_minmax_scalar_lrint_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qc8_gemm_minmax_fp32_ukernel_2x4__scalar_lrintf, xnn_init_qs8_minmax_scalar_lrintf_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
}
-TEST(QC8_GEMM_MINMAX_FP32_2X4__SCALAR_LRINT, n_gt_4_subtile) {
+TEST(QC8_GEMM_MINMAX_FP32_2X4__SCALAR_LRINTF, n_gt_4_subtile) {
for (uint32_t n = 5; n < 8; n++) {
for (size_t k = 1; k <= 5; k += 2) {
for (uint32_t m = 1; m <= 2; m++) {
@@ -79031,13 +79031,13 @@
.n(n)
.k(k)
.iterations(1)
- .Test(xnn_qc8_gemm_minmax_fp32_ukernel_2x4__scalar_lrint, xnn_init_qs8_minmax_scalar_lrint_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qc8_gemm_minmax_fp32_ukernel_2x4__scalar_lrintf, xnn_init_qs8_minmax_scalar_lrintf_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
}
}
-TEST(QC8_GEMM_MINMAX_FP32_2X4__SCALAR_LRINT, n_div_4) {
+TEST(QC8_GEMM_MINMAX_FP32_2X4__SCALAR_LRINTF, n_div_4) {
for (uint32_t n = 8; n <= 12; n += 4) {
for (size_t k = 1; k <= 5; k += 2) {
GemmMicrokernelTester()
@@ -79048,12 +79048,12 @@
.m(2)
.n(4)
.k(k)
- .Test(xnn_qc8_gemm_minmax_fp32_ukernel_2x4__scalar_lrint, xnn_init_qs8_minmax_scalar_lrint_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qc8_gemm_minmax_fp32_ukernel_2x4__scalar_lrintf, xnn_init_qs8_minmax_scalar_lrintf_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
}
-TEST(QC8_GEMM_MINMAX_FP32_2X4__SCALAR_LRINT, n_div_4_strided_cn) {
+TEST(QC8_GEMM_MINMAX_FP32_2X4__SCALAR_LRINTF, n_div_4_strided_cn) {
for (uint32_t n = 8; n <= 12; n += 4) {
for (size_t k = 1; k <= 5; k += 2) {
GemmMicrokernelTester()
@@ -79065,12 +79065,12 @@
.n(n)
.k(k)
.cn_stride(7)
- .Test(xnn_qc8_gemm_minmax_fp32_ukernel_2x4__scalar_lrint, xnn_init_qs8_minmax_scalar_lrint_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qc8_gemm_minmax_fp32_ukernel_2x4__scalar_lrintf, xnn_init_qs8_minmax_scalar_lrintf_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
}
-TEST(QC8_GEMM_MINMAX_FP32_2X4__SCALAR_LRINT, n_div_4_strided_a) {
+TEST(QC8_GEMM_MINMAX_FP32_2X4__SCALAR_LRINTF, n_div_4_strided_a) {
for (uint32_t n = 8; n <= 12; n += 4) {
for (size_t k = 1; k <= 5; k += 2) {
GemmMicrokernelTester()
@@ -79082,12 +79082,12 @@
.n(n)
.k(k)
.a_stride(7)
- .Test(xnn_qc8_gemm_minmax_fp32_ukernel_2x4__scalar_lrint, xnn_init_qs8_minmax_scalar_lrint_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qc8_gemm_minmax_fp32_ukernel_2x4__scalar_lrintf, xnn_init_qs8_minmax_scalar_lrintf_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
}
-TEST(QC8_GEMM_MINMAX_FP32_2X4__SCALAR_LRINT, n_div_4_subtile) {
+TEST(QC8_GEMM_MINMAX_FP32_2X4__SCALAR_LRINTF, n_div_4_subtile) {
for (uint32_t n = 8; n <= 12; n += 4) {
for (size_t k = 1; k <= 5; k += 2) {
for (uint32_t m = 1; m <= 2; m++) {
@@ -79100,13 +79100,13 @@
.n(n)
.k(k)
.iterations(1)
- .Test(xnn_qc8_gemm_minmax_fp32_ukernel_2x4__scalar_lrint, xnn_init_qs8_minmax_scalar_lrint_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qc8_gemm_minmax_fp32_ukernel_2x4__scalar_lrintf, xnn_init_qs8_minmax_scalar_lrintf_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
}
}
-TEST(QC8_GEMM_MINMAX_FP32_2X4__SCALAR_LRINT, strided_cm_subtile) {
+TEST(QC8_GEMM_MINMAX_FP32_2X4__SCALAR_LRINTF, strided_cm_subtile) {
for (size_t k = 1; k <= 5; k += 2) {
for (uint32_t m = 1; m <= 2; m++) {
for (uint32_t n = 1; n <= 4; n++) {
@@ -79120,13 +79120,13 @@
.k(k)
.cm_stride(7)
.iterations(1)
- .Test(xnn_qc8_gemm_minmax_fp32_ukernel_2x4__scalar_lrint, xnn_init_qs8_minmax_scalar_lrint_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qc8_gemm_minmax_fp32_ukernel_2x4__scalar_lrintf, xnn_init_qs8_minmax_scalar_lrintf_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
}
}
-TEST(QC8_GEMM_MINMAX_FP32_2X4__SCALAR_LRINT, qmin) {
+TEST(QC8_GEMM_MINMAX_FP32_2X4__SCALAR_LRINTF, qmin) {
GemmMicrokernelTester()
.mr(2)
.nr(4)
@@ -79136,10 +79136,10 @@
.n(4)
.k(1)
.qmin(128)
- .Test(xnn_qc8_gemm_minmax_fp32_ukernel_2x4__scalar_lrint, xnn_init_qs8_minmax_scalar_lrint_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qc8_gemm_minmax_fp32_ukernel_2x4__scalar_lrintf, xnn_init_qs8_minmax_scalar_lrintf_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
-TEST(QC8_GEMM_MINMAX_FP32_2X4__SCALAR_LRINT, qmax) {
+TEST(QC8_GEMM_MINMAX_FP32_2X4__SCALAR_LRINTF, qmax) {
GemmMicrokernelTester()
.mr(2)
.nr(4)
@@ -79149,10 +79149,10 @@
.n(4)
.k(1)
.qmax(128)
- .Test(xnn_qc8_gemm_minmax_fp32_ukernel_2x4__scalar_lrint, xnn_init_qs8_minmax_scalar_lrint_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qc8_gemm_minmax_fp32_ukernel_2x4__scalar_lrintf, xnn_init_qs8_minmax_scalar_lrintf_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
-TEST(QC8_GEMM_MINMAX_FP32_2X4__SCALAR_LRINT, strided_cm) {
+TEST(QC8_GEMM_MINMAX_FP32_2X4__SCALAR_LRINTF, strided_cm) {
GemmMicrokernelTester()
.mr(2)
.nr(4)
@@ -79162,11 +79162,11 @@
.n(4)
.k(1)
.cm_stride(7)
- .Test(xnn_qc8_gemm_minmax_fp32_ukernel_2x4__scalar_lrint, xnn_init_qs8_minmax_scalar_lrint_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qc8_gemm_minmax_fp32_ukernel_2x4__scalar_lrintf, xnn_init_qs8_minmax_scalar_lrintf_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
-TEST(QC8_GEMM_MINMAX_FP32_3X4__SCALAR_LRINT, k_eq_1) {
+TEST(QC8_GEMM_MINMAX_FP32_3X4__SCALAR_LRINTF, k_eq_1) {
GemmMicrokernelTester()
.mr(3)
.nr(4)
@@ -79175,10 +79175,10 @@
.m(3)
.n(4)
.k(1)
- .Test(xnn_qc8_gemm_minmax_fp32_ukernel_3x4__scalar_lrint, xnn_init_qs8_minmax_scalar_lrint_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qc8_gemm_minmax_fp32_ukernel_3x4__scalar_lrintf, xnn_init_qs8_minmax_scalar_lrintf_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
-TEST(QC8_GEMM_MINMAX_FP32_3X4__SCALAR_LRINT, strided_cn) {
+TEST(QC8_GEMM_MINMAX_FP32_3X4__SCALAR_LRINTF, strided_cn) {
GemmMicrokernelTester()
.mr(3)
.nr(4)
@@ -79188,10 +79188,10 @@
.n(4)
.k(1)
.cn_stride(7)
- .Test(xnn_qc8_gemm_minmax_fp32_ukernel_3x4__scalar_lrint, xnn_init_qs8_minmax_scalar_lrint_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qc8_gemm_minmax_fp32_ukernel_3x4__scalar_lrintf, xnn_init_qs8_minmax_scalar_lrintf_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
-TEST(QC8_GEMM_MINMAX_FP32_3X4__SCALAR_LRINT, k_eq_1_strided_a) {
+TEST(QC8_GEMM_MINMAX_FP32_3X4__SCALAR_LRINTF, k_eq_1_strided_a) {
GemmMicrokernelTester()
.mr(3)
.nr(4)
@@ -79201,10 +79201,10 @@
.n(4)
.k(1)
.a_stride(3)
- .Test(xnn_qc8_gemm_minmax_fp32_ukernel_3x4__scalar_lrint, xnn_init_qs8_minmax_scalar_lrint_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qc8_gemm_minmax_fp32_ukernel_3x4__scalar_lrintf, xnn_init_qs8_minmax_scalar_lrintf_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
-TEST(QC8_GEMM_MINMAX_FP32_3X4__SCALAR_LRINT, k_eq_1_subtile) {
+TEST(QC8_GEMM_MINMAX_FP32_3X4__SCALAR_LRINTF, k_eq_1_subtile) {
for (uint32_t m = 1; m <= 3; m++) {
for (uint32_t n = 1; n <= 4; n++) {
GemmMicrokernelTester()
@@ -79216,12 +79216,12 @@
.n(n)
.k(1)
.iterations(1)
- .Test(xnn_qc8_gemm_minmax_fp32_ukernel_3x4__scalar_lrint, xnn_init_qs8_minmax_scalar_lrint_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qc8_gemm_minmax_fp32_ukernel_3x4__scalar_lrintf, xnn_init_qs8_minmax_scalar_lrintf_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
}
-TEST(QC8_GEMM_MINMAX_FP32_3X4__SCALAR_LRINT, k_eq_1_subtile_m) {
+TEST(QC8_GEMM_MINMAX_FP32_3X4__SCALAR_LRINTF, k_eq_1_subtile_m) {
for (uint32_t m = 1; m <= 3; m++) {
GemmMicrokernelTester()
.mr(3)
@@ -79232,11 +79232,11 @@
.n(4)
.k(1)
.iterations(1)
- .Test(xnn_qc8_gemm_minmax_fp32_ukernel_3x4__scalar_lrint, xnn_init_qs8_minmax_scalar_lrint_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qc8_gemm_minmax_fp32_ukernel_3x4__scalar_lrintf, xnn_init_qs8_minmax_scalar_lrintf_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
-TEST(QC8_GEMM_MINMAX_FP32_3X4__SCALAR_LRINT, k_eq_1_subtile_n) {
+TEST(QC8_GEMM_MINMAX_FP32_3X4__SCALAR_LRINTF, k_eq_1_subtile_n) {
for (uint32_t n = 1; n <= 4; n++) {
GemmMicrokernelTester()
.mr(3)
@@ -79247,11 +79247,11 @@
.n(n)
.k(1)
.iterations(1)
- .Test(xnn_qc8_gemm_minmax_fp32_ukernel_3x4__scalar_lrint, xnn_init_qs8_minmax_scalar_lrint_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qc8_gemm_minmax_fp32_ukernel_3x4__scalar_lrintf, xnn_init_qs8_minmax_scalar_lrintf_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
-TEST(QC8_GEMM_MINMAX_FP32_3X4__SCALAR_LRINT, k_gt_1) {
+TEST(QC8_GEMM_MINMAX_FP32_3X4__SCALAR_LRINTF, k_gt_1) {
for (size_t k = 2; k < 10; k++) {
GemmMicrokernelTester()
.mr(3)
@@ -79261,11 +79261,11 @@
.m(3)
.n(4)
.k(k)
- .Test(xnn_qc8_gemm_minmax_fp32_ukernel_3x4__scalar_lrint, xnn_init_qs8_minmax_scalar_lrint_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qc8_gemm_minmax_fp32_ukernel_3x4__scalar_lrintf, xnn_init_qs8_minmax_scalar_lrintf_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
-TEST(QC8_GEMM_MINMAX_FP32_3X4__SCALAR_LRINT, k_gt_1_strided_a) {
+TEST(QC8_GEMM_MINMAX_FP32_3X4__SCALAR_LRINTF, k_gt_1_strided_a) {
for (size_t k = 2; k < 10; k++) {
GemmMicrokernelTester()
.mr(3)
@@ -79276,11 +79276,11 @@
.n(4)
.k(k)
.a_stride(11)
- .Test(xnn_qc8_gemm_minmax_fp32_ukernel_3x4__scalar_lrint, xnn_init_qs8_minmax_scalar_lrint_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qc8_gemm_minmax_fp32_ukernel_3x4__scalar_lrintf, xnn_init_qs8_minmax_scalar_lrintf_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
-TEST(QC8_GEMM_MINMAX_FP32_3X4__SCALAR_LRINT, k_gt_1_subtile) {
+TEST(QC8_GEMM_MINMAX_FP32_3X4__SCALAR_LRINTF, k_gt_1_subtile) {
for (size_t k = 2; k < 10; k++) {
for (uint32_t m = 1; m <= 3; m++) {
for (uint32_t n = 1; n <= 4; n++) {
@@ -79293,13 +79293,13 @@
.n(n)
.k(k)
.iterations(1)
- .Test(xnn_qc8_gemm_minmax_fp32_ukernel_3x4__scalar_lrint, xnn_init_qs8_minmax_scalar_lrint_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qc8_gemm_minmax_fp32_ukernel_3x4__scalar_lrintf, xnn_init_qs8_minmax_scalar_lrintf_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
}
}
-TEST(QC8_GEMM_MINMAX_FP32_3X4__SCALAR_LRINT, n_gt_4) {
+TEST(QC8_GEMM_MINMAX_FP32_3X4__SCALAR_LRINTF, n_gt_4) {
for (uint32_t n = 5; n < 8; n++) {
for (size_t k = 1; k <= 5; k += 2) {
GemmMicrokernelTester()
@@ -79310,12 +79310,12 @@
.m(3)
.n(4)
.k(k)
- .Test(xnn_qc8_gemm_minmax_fp32_ukernel_3x4__scalar_lrint, xnn_init_qs8_minmax_scalar_lrint_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qc8_gemm_minmax_fp32_ukernel_3x4__scalar_lrintf, xnn_init_qs8_minmax_scalar_lrintf_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
}
-TEST(QC8_GEMM_MINMAX_FP32_3X4__SCALAR_LRINT, n_gt_4_strided_cn) {
+TEST(QC8_GEMM_MINMAX_FP32_3X4__SCALAR_LRINTF, n_gt_4_strided_cn) {
for (uint32_t n = 5; n < 8; n++) {
for (size_t k = 1; k <= 5; k += 2) {
GemmMicrokernelTester()
@@ -79327,12 +79327,12 @@
.n(4)
.k(k)
.cn_stride(7)
- .Test(xnn_qc8_gemm_minmax_fp32_ukernel_3x4__scalar_lrint, xnn_init_qs8_minmax_scalar_lrint_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qc8_gemm_minmax_fp32_ukernel_3x4__scalar_lrintf, xnn_init_qs8_minmax_scalar_lrintf_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
}
-TEST(QC8_GEMM_MINMAX_FP32_3X4__SCALAR_LRINT, n_gt_4_strided_a) {
+TEST(QC8_GEMM_MINMAX_FP32_3X4__SCALAR_LRINTF, n_gt_4_strided_a) {
for (uint32_t n = 5; n < 8; n++) {
for (size_t k = 1; k <= 5; k += 2) {
GemmMicrokernelTester()
@@ -79344,12 +79344,12 @@
.n(n)
.k(k)
.a_stride(7)
- .Test(xnn_qc8_gemm_minmax_fp32_ukernel_3x4__scalar_lrint, xnn_init_qs8_minmax_scalar_lrint_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qc8_gemm_minmax_fp32_ukernel_3x4__scalar_lrintf, xnn_init_qs8_minmax_scalar_lrintf_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
}
-TEST(QC8_GEMM_MINMAX_FP32_3X4__SCALAR_LRINT, n_gt_4_subtile) {
+TEST(QC8_GEMM_MINMAX_FP32_3X4__SCALAR_LRINTF, n_gt_4_subtile) {
for (uint32_t n = 5; n < 8; n++) {
for (size_t k = 1; k <= 5; k += 2) {
for (uint32_t m = 1; m <= 3; m++) {
@@ -79362,13 +79362,13 @@
.n(n)
.k(k)
.iterations(1)
- .Test(xnn_qc8_gemm_minmax_fp32_ukernel_3x4__scalar_lrint, xnn_init_qs8_minmax_scalar_lrint_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qc8_gemm_minmax_fp32_ukernel_3x4__scalar_lrintf, xnn_init_qs8_minmax_scalar_lrintf_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
}
}
-TEST(QC8_GEMM_MINMAX_FP32_3X4__SCALAR_LRINT, n_div_4) {
+TEST(QC8_GEMM_MINMAX_FP32_3X4__SCALAR_LRINTF, n_div_4) {
for (uint32_t n = 8; n <= 12; n += 4) {
for (size_t k = 1; k <= 5; k += 2) {
GemmMicrokernelTester()
@@ -79379,12 +79379,12 @@
.m(3)
.n(4)
.k(k)
- .Test(xnn_qc8_gemm_minmax_fp32_ukernel_3x4__scalar_lrint, xnn_init_qs8_minmax_scalar_lrint_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qc8_gemm_minmax_fp32_ukernel_3x4__scalar_lrintf, xnn_init_qs8_minmax_scalar_lrintf_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
}
-TEST(QC8_GEMM_MINMAX_FP32_3X4__SCALAR_LRINT, n_div_4_strided_cn) {
+TEST(QC8_GEMM_MINMAX_FP32_3X4__SCALAR_LRINTF, n_div_4_strided_cn) {
for (uint32_t n = 8; n <= 12; n += 4) {
for (size_t k = 1; k <= 5; k += 2) {
GemmMicrokernelTester()
@@ -79396,12 +79396,12 @@
.n(n)
.k(k)
.cn_stride(7)
- .Test(xnn_qc8_gemm_minmax_fp32_ukernel_3x4__scalar_lrint, xnn_init_qs8_minmax_scalar_lrint_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qc8_gemm_minmax_fp32_ukernel_3x4__scalar_lrintf, xnn_init_qs8_minmax_scalar_lrintf_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
}
-TEST(QC8_GEMM_MINMAX_FP32_3X4__SCALAR_LRINT, n_div_4_strided_a) {
+TEST(QC8_GEMM_MINMAX_FP32_3X4__SCALAR_LRINTF, n_div_4_strided_a) {
for (uint32_t n = 8; n <= 12; n += 4) {
for (size_t k = 1; k <= 5; k += 2) {
GemmMicrokernelTester()
@@ -79413,12 +79413,12 @@
.n(n)
.k(k)
.a_stride(7)
- .Test(xnn_qc8_gemm_minmax_fp32_ukernel_3x4__scalar_lrint, xnn_init_qs8_minmax_scalar_lrint_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qc8_gemm_minmax_fp32_ukernel_3x4__scalar_lrintf, xnn_init_qs8_minmax_scalar_lrintf_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
}
-TEST(QC8_GEMM_MINMAX_FP32_3X4__SCALAR_LRINT, n_div_4_subtile) {
+TEST(QC8_GEMM_MINMAX_FP32_3X4__SCALAR_LRINTF, n_div_4_subtile) {
for (uint32_t n = 8; n <= 12; n += 4) {
for (size_t k = 1; k <= 5; k += 2) {
for (uint32_t m = 1; m <= 3; m++) {
@@ -79431,13 +79431,13 @@
.n(n)
.k(k)
.iterations(1)
- .Test(xnn_qc8_gemm_minmax_fp32_ukernel_3x4__scalar_lrint, xnn_init_qs8_minmax_scalar_lrint_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qc8_gemm_minmax_fp32_ukernel_3x4__scalar_lrintf, xnn_init_qs8_minmax_scalar_lrintf_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
}
}
-TEST(QC8_GEMM_MINMAX_FP32_3X4__SCALAR_LRINT, strided_cm_subtile) {
+TEST(QC8_GEMM_MINMAX_FP32_3X4__SCALAR_LRINTF, strided_cm_subtile) {
for (size_t k = 1; k <= 5; k += 2) {
for (uint32_t m = 1; m <= 3; m++) {
for (uint32_t n = 1; n <= 4; n++) {
@@ -79451,13 +79451,13 @@
.k(k)
.cm_stride(7)
.iterations(1)
- .Test(xnn_qc8_gemm_minmax_fp32_ukernel_3x4__scalar_lrint, xnn_init_qs8_minmax_scalar_lrint_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qc8_gemm_minmax_fp32_ukernel_3x4__scalar_lrintf, xnn_init_qs8_minmax_scalar_lrintf_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
}
}
-TEST(QC8_GEMM_MINMAX_FP32_3X4__SCALAR_LRINT, qmin) {
+TEST(QC8_GEMM_MINMAX_FP32_3X4__SCALAR_LRINTF, qmin) {
GemmMicrokernelTester()
.mr(3)
.nr(4)
@@ -79467,10 +79467,10 @@
.n(4)
.k(1)
.qmin(128)
- .Test(xnn_qc8_gemm_minmax_fp32_ukernel_3x4__scalar_lrint, xnn_init_qs8_minmax_scalar_lrint_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qc8_gemm_minmax_fp32_ukernel_3x4__scalar_lrintf, xnn_init_qs8_minmax_scalar_lrintf_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
-TEST(QC8_GEMM_MINMAX_FP32_3X4__SCALAR_LRINT, qmax) {
+TEST(QC8_GEMM_MINMAX_FP32_3X4__SCALAR_LRINTF, qmax) {
GemmMicrokernelTester()
.mr(3)
.nr(4)
@@ -79480,10 +79480,10 @@
.n(4)
.k(1)
.qmax(128)
- .Test(xnn_qc8_gemm_minmax_fp32_ukernel_3x4__scalar_lrint, xnn_init_qs8_minmax_scalar_lrint_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qc8_gemm_minmax_fp32_ukernel_3x4__scalar_lrintf, xnn_init_qs8_minmax_scalar_lrintf_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
-TEST(QC8_GEMM_MINMAX_FP32_3X4__SCALAR_LRINT, strided_cm) {
+TEST(QC8_GEMM_MINMAX_FP32_3X4__SCALAR_LRINTF, strided_cm) {
GemmMicrokernelTester()
.mr(3)
.nr(4)
@@ -79493,11 +79493,11 @@
.n(4)
.k(1)
.cm_stride(7)
- .Test(xnn_qc8_gemm_minmax_fp32_ukernel_3x4__scalar_lrint, xnn_init_qs8_minmax_scalar_lrint_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qc8_gemm_minmax_fp32_ukernel_3x4__scalar_lrintf, xnn_init_qs8_minmax_scalar_lrintf_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
-TEST(QC8_GEMM_MINMAX_FP32_4X4__SCALAR_LRINT, k_eq_1) {
+TEST(QC8_GEMM_MINMAX_FP32_4X4__SCALAR_LRINTF, k_eq_1) {
GemmMicrokernelTester()
.mr(4)
.nr(4)
@@ -79506,10 +79506,10 @@
.m(4)
.n(4)
.k(1)
- .Test(xnn_qc8_gemm_minmax_fp32_ukernel_4x4__scalar_lrint, xnn_init_qs8_minmax_scalar_lrint_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qc8_gemm_minmax_fp32_ukernel_4x4__scalar_lrintf, xnn_init_qs8_minmax_scalar_lrintf_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
-TEST(QC8_GEMM_MINMAX_FP32_4X4__SCALAR_LRINT, strided_cn) {
+TEST(QC8_GEMM_MINMAX_FP32_4X4__SCALAR_LRINTF, strided_cn) {
GemmMicrokernelTester()
.mr(4)
.nr(4)
@@ -79519,10 +79519,10 @@
.n(4)
.k(1)
.cn_stride(7)
- .Test(xnn_qc8_gemm_minmax_fp32_ukernel_4x4__scalar_lrint, xnn_init_qs8_minmax_scalar_lrint_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qc8_gemm_minmax_fp32_ukernel_4x4__scalar_lrintf, xnn_init_qs8_minmax_scalar_lrintf_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
-TEST(QC8_GEMM_MINMAX_FP32_4X4__SCALAR_LRINT, k_eq_1_strided_a) {
+TEST(QC8_GEMM_MINMAX_FP32_4X4__SCALAR_LRINTF, k_eq_1_strided_a) {
GemmMicrokernelTester()
.mr(4)
.nr(4)
@@ -79532,10 +79532,10 @@
.n(4)
.k(1)
.a_stride(3)
- .Test(xnn_qc8_gemm_minmax_fp32_ukernel_4x4__scalar_lrint, xnn_init_qs8_minmax_scalar_lrint_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qc8_gemm_minmax_fp32_ukernel_4x4__scalar_lrintf, xnn_init_qs8_minmax_scalar_lrintf_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
-TEST(QC8_GEMM_MINMAX_FP32_4X4__SCALAR_LRINT, k_eq_1_subtile) {
+TEST(QC8_GEMM_MINMAX_FP32_4X4__SCALAR_LRINTF, k_eq_1_subtile) {
for (uint32_t m = 1; m <= 4; m++) {
for (uint32_t n = 1; n <= 4; n++) {
GemmMicrokernelTester()
@@ -79547,12 +79547,12 @@
.n(n)
.k(1)
.iterations(1)
- .Test(xnn_qc8_gemm_minmax_fp32_ukernel_4x4__scalar_lrint, xnn_init_qs8_minmax_scalar_lrint_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qc8_gemm_minmax_fp32_ukernel_4x4__scalar_lrintf, xnn_init_qs8_minmax_scalar_lrintf_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
}
-TEST(QC8_GEMM_MINMAX_FP32_4X4__SCALAR_LRINT, k_eq_1_subtile_m) {
+TEST(QC8_GEMM_MINMAX_FP32_4X4__SCALAR_LRINTF, k_eq_1_subtile_m) {
for (uint32_t m = 1; m <= 4; m++) {
GemmMicrokernelTester()
.mr(4)
@@ -79563,11 +79563,11 @@
.n(4)
.k(1)
.iterations(1)
- .Test(xnn_qc8_gemm_minmax_fp32_ukernel_4x4__scalar_lrint, xnn_init_qs8_minmax_scalar_lrint_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qc8_gemm_minmax_fp32_ukernel_4x4__scalar_lrintf, xnn_init_qs8_minmax_scalar_lrintf_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
-TEST(QC8_GEMM_MINMAX_FP32_4X4__SCALAR_LRINT, k_eq_1_subtile_n) {
+TEST(QC8_GEMM_MINMAX_FP32_4X4__SCALAR_LRINTF, k_eq_1_subtile_n) {
for (uint32_t n = 1; n <= 4; n++) {
GemmMicrokernelTester()
.mr(4)
@@ -79578,11 +79578,11 @@
.n(n)
.k(1)
.iterations(1)
- .Test(xnn_qc8_gemm_minmax_fp32_ukernel_4x4__scalar_lrint, xnn_init_qs8_minmax_scalar_lrint_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qc8_gemm_minmax_fp32_ukernel_4x4__scalar_lrintf, xnn_init_qs8_minmax_scalar_lrintf_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
-TEST(QC8_GEMM_MINMAX_FP32_4X4__SCALAR_LRINT, k_gt_1) {
+TEST(QC8_GEMM_MINMAX_FP32_4X4__SCALAR_LRINTF, k_gt_1) {
for (size_t k = 2; k < 10; k++) {
GemmMicrokernelTester()
.mr(4)
@@ -79592,11 +79592,11 @@
.m(4)
.n(4)
.k(k)
- .Test(xnn_qc8_gemm_minmax_fp32_ukernel_4x4__scalar_lrint, xnn_init_qs8_minmax_scalar_lrint_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qc8_gemm_minmax_fp32_ukernel_4x4__scalar_lrintf, xnn_init_qs8_minmax_scalar_lrintf_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
-TEST(QC8_GEMM_MINMAX_FP32_4X4__SCALAR_LRINT, k_gt_1_strided_a) {
+TEST(QC8_GEMM_MINMAX_FP32_4X4__SCALAR_LRINTF, k_gt_1_strided_a) {
for (size_t k = 2; k < 10; k++) {
GemmMicrokernelTester()
.mr(4)
@@ -79607,11 +79607,11 @@
.n(4)
.k(k)
.a_stride(11)
- .Test(xnn_qc8_gemm_minmax_fp32_ukernel_4x4__scalar_lrint, xnn_init_qs8_minmax_scalar_lrint_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qc8_gemm_minmax_fp32_ukernel_4x4__scalar_lrintf, xnn_init_qs8_minmax_scalar_lrintf_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
-TEST(QC8_GEMM_MINMAX_FP32_4X4__SCALAR_LRINT, k_gt_1_subtile) {
+TEST(QC8_GEMM_MINMAX_FP32_4X4__SCALAR_LRINTF, k_gt_1_subtile) {
for (size_t k = 2; k < 10; k++) {
for (uint32_t m = 1; m <= 4; m++) {
for (uint32_t n = 1; n <= 4; n++) {
@@ -79624,13 +79624,13 @@
.n(n)
.k(k)
.iterations(1)
- .Test(xnn_qc8_gemm_minmax_fp32_ukernel_4x4__scalar_lrint, xnn_init_qs8_minmax_scalar_lrint_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qc8_gemm_minmax_fp32_ukernel_4x4__scalar_lrintf, xnn_init_qs8_minmax_scalar_lrintf_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
}
}
-TEST(QC8_GEMM_MINMAX_FP32_4X4__SCALAR_LRINT, n_gt_4) {
+TEST(QC8_GEMM_MINMAX_FP32_4X4__SCALAR_LRINTF, n_gt_4) {
for (uint32_t n = 5; n < 8; n++) {
for (size_t k = 1; k <= 5; k += 2) {
GemmMicrokernelTester()
@@ -79641,12 +79641,12 @@
.m(4)
.n(4)
.k(k)
- .Test(xnn_qc8_gemm_minmax_fp32_ukernel_4x4__scalar_lrint, xnn_init_qs8_minmax_scalar_lrint_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qc8_gemm_minmax_fp32_ukernel_4x4__scalar_lrintf, xnn_init_qs8_minmax_scalar_lrintf_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
}
-TEST(QC8_GEMM_MINMAX_FP32_4X4__SCALAR_LRINT, n_gt_4_strided_cn) {
+TEST(QC8_GEMM_MINMAX_FP32_4X4__SCALAR_LRINTF, n_gt_4_strided_cn) {
for (uint32_t n = 5; n < 8; n++) {
for (size_t k = 1; k <= 5; k += 2) {
GemmMicrokernelTester()
@@ -79658,12 +79658,12 @@
.n(4)
.k(k)
.cn_stride(7)
- .Test(xnn_qc8_gemm_minmax_fp32_ukernel_4x4__scalar_lrint, xnn_init_qs8_minmax_scalar_lrint_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qc8_gemm_minmax_fp32_ukernel_4x4__scalar_lrintf, xnn_init_qs8_minmax_scalar_lrintf_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
}
-TEST(QC8_GEMM_MINMAX_FP32_4X4__SCALAR_LRINT, n_gt_4_strided_a) {
+TEST(QC8_GEMM_MINMAX_FP32_4X4__SCALAR_LRINTF, n_gt_4_strided_a) {
for (uint32_t n = 5; n < 8; n++) {
for (size_t k = 1; k <= 5; k += 2) {
GemmMicrokernelTester()
@@ -79675,12 +79675,12 @@
.n(n)
.k(k)
.a_stride(7)
- .Test(xnn_qc8_gemm_minmax_fp32_ukernel_4x4__scalar_lrint, xnn_init_qs8_minmax_scalar_lrint_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qc8_gemm_minmax_fp32_ukernel_4x4__scalar_lrintf, xnn_init_qs8_minmax_scalar_lrintf_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
}
-TEST(QC8_GEMM_MINMAX_FP32_4X4__SCALAR_LRINT, n_gt_4_subtile) {
+TEST(QC8_GEMM_MINMAX_FP32_4X4__SCALAR_LRINTF, n_gt_4_subtile) {
for (uint32_t n = 5; n < 8; n++) {
for (size_t k = 1; k <= 5; k += 2) {
for (uint32_t m = 1; m <= 4; m++) {
@@ -79693,13 +79693,13 @@
.n(n)
.k(k)
.iterations(1)
- .Test(xnn_qc8_gemm_minmax_fp32_ukernel_4x4__scalar_lrint, xnn_init_qs8_minmax_scalar_lrint_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qc8_gemm_minmax_fp32_ukernel_4x4__scalar_lrintf, xnn_init_qs8_minmax_scalar_lrintf_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
}
}
-TEST(QC8_GEMM_MINMAX_FP32_4X4__SCALAR_LRINT, n_div_4) {
+TEST(QC8_GEMM_MINMAX_FP32_4X4__SCALAR_LRINTF, n_div_4) {
for (uint32_t n = 8; n <= 12; n += 4) {
for (size_t k = 1; k <= 5; k += 2) {
GemmMicrokernelTester()
@@ -79710,12 +79710,12 @@
.m(4)
.n(4)
.k(k)
- .Test(xnn_qc8_gemm_minmax_fp32_ukernel_4x4__scalar_lrint, xnn_init_qs8_minmax_scalar_lrint_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qc8_gemm_minmax_fp32_ukernel_4x4__scalar_lrintf, xnn_init_qs8_minmax_scalar_lrintf_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
}
-TEST(QC8_GEMM_MINMAX_FP32_4X4__SCALAR_LRINT, n_div_4_strided_cn) {
+TEST(QC8_GEMM_MINMAX_FP32_4X4__SCALAR_LRINTF, n_div_4_strided_cn) {
for (uint32_t n = 8; n <= 12; n += 4) {
for (size_t k = 1; k <= 5; k += 2) {
GemmMicrokernelTester()
@@ -79727,12 +79727,12 @@
.n(n)
.k(k)
.cn_stride(7)
- .Test(xnn_qc8_gemm_minmax_fp32_ukernel_4x4__scalar_lrint, xnn_init_qs8_minmax_scalar_lrint_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qc8_gemm_minmax_fp32_ukernel_4x4__scalar_lrintf, xnn_init_qs8_minmax_scalar_lrintf_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
}
-TEST(QC8_GEMM_MINMAX_FP32_4X4__SCALAR_LRINT, n_div_4_strided_a) {
+TEST(QC8_GEMM_MINMAX_FP32_4X4__SCALAR_LRINTF, n_div_4_strided_a) {
for (uint32_t n = 8; n <= 12; n += 4) {
for (size_t k = 1; k <= 5; k += 2) {
GemmMicrokernelTester()
@@ -79744,12 +79744,12 @@
.n(n)
.k(k)
.a_stride(7)
- .Test(xnn_qc8_gemm_minmax_fp32_ukernel_4x4__scalar_lrint, xnn_init_qs8_minmax_scalar_lrint_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qc8_gemm_minmax_fp32_ukernel_4x4__scalar_lrintf, xnn_init_qs8_minmax_scalar_lrintf_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
}
-TEST(QC8_GEMM_MINMAX_FP32_4X4__SCALAR_LRINT, n_div_4_subtile) {
+TEST(QC8_GEMM_MINMAX_FP32_4X4__SCALAR_LRINTF, n_div_4_subtile) {
for (uint32_t n = 8; n <= 12; n += 4) {
for (size_t k = 1; k <= 5; k += 2) {
for (uint32_t m = 1; m <= 4; m++) {
@@ -79762,13 +79762,13 @@
.n(n)
.k(k)
.iterations(1)
- .Test(xnn_qc8_gemm_minmax_fp32_ukernel_4x4__scalar_lrint, xnn_init_qs8_minmax_scalar_lrint_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qc8_gemm_minmax_fp32_ukernel_4x4__scalar_lrintf, xnn_init_qs8_minmax_scalar_lrintf_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
}
}
-TEST(QC8_GEMM_MINMAX_FP32_4X4__SCALAR_LRINT, strided_cm_subtile) {
+TEST(QC8_GEMM_MINMAX_FP32_4X4__SCALAR_LRINTF, strided_cm_subtile) {
for (size_t k = 1; k <= 5; k += 2) {
for (uint32_t m = 1; m <= 4; m++) {
for (uint32_t n = 1; n <= 4; n++) {
@@ -79782,13 +79782,13 @@
.k(k)
.cm_stride(7)
.iterations(1)
- .Test(xnn_qc8_gemm_minmax_fp32_ukernel_4x4__scalar_lrint, xnn_init_qs8_minmax_scalar_lrint_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qc8_gemm_minmax_fp32_ukernel_4x4__scalar_lrintf, xnn_init_qs8_minmax_scalar_lrintf_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
}
}
-TEST(QC8_GEMM_MINMAX_FP32_4X4__SCALAR_LRINT, qmin) {
+TEST(QC8_GEMM_MINMAX_FP32_4X4__SCALAR_LRINTF, qmin) {
GemmMicrokernelTester()
.mr(4)
.nr(4)
@@ -79798,10 +79798,10 @@
.n(4)
.k(1)
.qmin(128)
- .Test(xnn_qc8_gemm_minmax_fp32_ukernel_4x4__scalar_lrint, xnn_init_qs8_minmax_scalar_lrint_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qc8_gemm_minmax_fp32_ukernel_4x4__scalar_lrintf, xnn_init_qs8_minmax_scalar_lrintf_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
-TEST(QC8_GEMM_MINMAX_FP32_4X4__SCALAR_LRINT, qmax) {
+TEST(QC8_GEMM_MINMAX_FP32_4X4__SCALAR_LRINTF, qmax) {
GemmMicrokernelTester()
.mr(4)
.nr(4)
@@ -79811,10 +79811,10 @@
.n(4)
.k(1)
.qmax(128)
- .Test(xnn_qc8_gemm_minmax_fp32_ukernel_4x4__scalar_lrint, xnn_init_qs8_minmax_scalar_lrint_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qc8_gemm_minmax_fp32_ukernel_4x4__scalar_lrintf, xnn_init_qs8_minmax_scalar_lrintf_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
-TEST(QC8_GEMM_MINMAX_FP32_4X4__SCALAR_LRINT, strided_cm) {
+TEST(QC8_GEMM_MINMAX_FP32_4X4__SCALAR_LRINTF, strided_cm) {
GemmMicrokernelTester()
.mr(4)
.nr(4)
@@ -79824,11 +79824,11 @@
.n(4)
.k(1)
.cm_stride(7)
- .Test(xnn_qc8_gemm_minmax_fp32_ukernel_4x4__scalar_lrint, xnn_init_qs8_minmax_scalar_lrint_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qc8_gemm_minmax_fp32_ukernel_4x4__scalar_lrintf, xnn_init_qs8_minmax_scalar_lrintf_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
-TEST(QC8_GEMM_MINMAX_FP32_1X2__SCALAR_MAGIC, k_eq_1) {
+TEST(QC8_GEMM_MINMAX_FP32_1X2__SCALAR_FMAGIC, k_eq_1) {
GemmMicrokernelTester()
.mr(1)
.nr(2)
@@ -79837,10 +79837,10 @@
.m(1)
.n(2)
.k(1)
- .Test(xnn_qc8_gemm_minmax_fp32_ukernel_1x2__scalar_magic, xnn_init_qs8_minmax_scalar_magic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qc8_gemm_minmax_fp32_ukernel_1x2__scalar_fmagic, xnn_init_qs8_minmax_scalar_fmagic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
-TEST(QC8_GEMM_MINMAX_FP32_1X2__SCALAR_MAGIC, strided_cn) {
+TEST(QC8_GEMM_MINMAX_FP32_1X2__SCALAR_FMAGIC, strided_cn) {
GemmMicrokernelTester()
.mr(1)
.nr(2)
@@ -79850,10 +79850,10 @@
.n(2)
.k(1)
.cn_stride(5)
- .Test(xnn_qc8_gemm_minmax_fp32_ukernel_1x2__scalar_magic, xnn_init_qs8_minmax_scalar_magic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qc8_gemm_minmax_fp32_ukernel_1x2__scalar_fmagic, xnn_init_qs8_minmax_scalar_fmagic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
-TEST(QC8_GEMM_MINMAX_FP32_1X2__SCALAR_MAGIC, k_eq_1_strided_a) {
+TEST(QC8_GEMM_MINMAX_FP32_1X2__SCALAR_FMAGIC, k_eq_1_strided_a) {
GemmMicrokernelTester()
.mr(1)
.nr(2)
@@ -79863,10 +79863,10 @@
.n(2)
.k(1)
.a_stride(3)
- .Test(xnn_qc8_gemm_minmax_fp32_ukernel_1x2__scalar_magic, xnn_init_qs8_minmax_scalar_magic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qc8_gemm_minmax_fp32_ukernel_1x2__scalar_fmagic, xnn_init_qs8_minmax_scalar_fmagic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
-TEST(QC8_GEMM_MINMAX_FP32_1X2__SCALAR_MAGIC, k_eq_1_subtile) {
+TEST(QC8_GEMM_MINMAX_FP32_1X2__SCALAR_FMAGIC, k_eq_1_subtile) {
for (uint32_t m = 1; m <= 1; m++) {
for (uint32_t n = 1; n <= 2; n++) {
GemmMicrokernelTester()
@@ -79878,12 +79878,12 @@
.n(n)
.k(1)
.iterations(1)
- .Test(xnn_qc8_gemm_minmax_fp32_ukernel_1x2__scalar_magic, xnn_init_qs8_minmax_scalar_magic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qc8_gemm_minmax_fp32_ukernel_1x2__scalar_fmagic, xnn_init_qs8_minmax_scalar_fmagic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
}
-TEST(QC8_GEMM_MINMAX_FP32_1X2__SCALAR_MAGIC, k_eq_1_subtile_m) {
+TEST(QC8_GEMM_MINMAX_FP32_1X2__SCALAR_FMAGIC, k_eq_1_subtile_m) {
for (uint32_t m = 1; m <= 1; m++) {
GemmMicrokernelTester()
.mr(1)
@@ -79894,11 +79894,11 @@
.n(2)
.k(1)
.iterations(1)
- .Test(xnn_qc8_gemm_minmax_fp32_ukernel_1x2__scalar_magic, xnn_init_qs8_minmax_scalar_magic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qc8_gemm_minmax_fp32_ukernel_1x2__scalar_fmagic, xnn_init_qs8_minmax_scalar_fmagic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
-TEST(QC8_GEMM_MINMAX_FP32_1X2__SCALAR_MAGIC, k_eq_1_subtile_n) {
+TEST(QC8_GEMM_MINMAX_FP32_1X2__SCALAR_FMAGIC, k_eq_1_subtile_n) {
for (uint32_t n = 1; n <= 2; n++) {
GemmMicrokernelTester()
.mr(1)
@@ -79909,11 +79909,11 @@
.n(n)
.k(1)
.iterations(1)
- .Test(xnn_qc8_gemm_minmax_fp32_ukernel_1x2__scalar_magic, xnn_init_qs8_minmax_scalar_magic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qc8_gemm_minmax_fp32_ukernel_1x2__scalar_fmagic, xnn_init_qs8_minmax_scalar_fmagic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
-TEST(QC8_GEMM_MINMAX_FP32_1X2__SCALAR_MAGIC, k_gt_1) {
+TEST(QC8_GEMM_MINMAX_FP32_1X2__SCALAR_FMAGIC, k_gt_1) {
for (size_t k = 2; k < 10; k++) {
GemmMicrokernelTester()
.mr(1)
@@ -79923,11 +79923,11 @@
.m(1)
.n(2)
.k(k)
- .Test(xnn_qc8_gemm_minmax_fp32_ukernel_1x2__scalar_magic, xnn_init_qs8_minmax_scalar_magic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qc8_gemm_minmax_fp32_ukernel_1x2__scalar_fmagic, xnn_init_qs8_minmax_scalar_fmagic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
-TEST(QC8_GEMM_MINMAX_FP32_1X2__SCALAR_MAGIC, k_gt_1_strided_a) {
+TEST(QC8_GEMM_MINMAX_FP32_1X2__SCALAR_FMAGIC, k_gt_1_strided_a) {
for (size_t k = 2; k < 10; k++) {
GemmMicrokernelTester()
.mr(1)
@@ -79938,11 +79938,11 @@
.n(2)
.k(k)
.a_stride(11)
- .Test(xnn_qc8_gemm_minmax_fp32_ukernel_1x2__scalar_magic, xnn_init_qs8_minmax_scalar_magic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qc8_gemm_minmax_fp32_ukernel_1x2__scalar_fmagic, xnn_init_qs8_minmax_scalar_fmagic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
-TEST(QC8_GEMM_MINMAX_FP32_1X2__SCALAR_MAGIC, k_gt_1_subtile) {
+TEST(QC8_GEMM_MINMAX_FP32_1X2__SCALAR_FMAGIC, k_gt_1_subtile) {
for (size_t k = 2; k < 10; k++) {
for (uint32_t m = 1; m <= 1; m++) {
for (uint32_t n = 1; n <= 2; n++) {
@@ -79955,13 +79955,13 @@
.n(n)
.k(k)
.iterations(1)
- .Test(xnn_qc8_gemm_minmax_fp32_ukernel_1x2__scalar_magic, xnn_init_qs8_minmax_scalar_magic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qc8_gemm_minmax_fp32_ukernel_1x2__scalar_fmagic, xnn_init_qs8_minmax_scalar_fmagic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
}
}
-TEST(QC8_GEMM_MINMAX_FP32_1X2__SCALAR_MAGIC, n_gt_2) {
+TEST(QC8_GEMM_MINMAX_FP32_1X2__SCALAR_FMAGIC, n_gt_2) {
for (uint32_t n = 3; n < 4; n++) {
for (size_t k = 1; k <= 5; k += 2) {
GemmMicrokernelTester()
@@ -79972,12 +79972,12 @@
.m(1)
.n(2)
.k(k)
- .Test(xnn_qc8_gemm_minmax_fp32_ukernel_1x2__scalar_magic, xnn_init_qs8_minmax_scalar_magic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qc8_gemm_minmax_fp32_ukernel_1x2__scalar_fmagic, xnn_init_qs8_minmax_scalar_fmagic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
}
-TEST(QC8_GEMM_MINMAX_FP32_1X2__SCALAR_MAGIC, n_gt_2_strided_cn) {
+TEST(QC8_GEMM_MINMAX_FP32_1X2__SCALAR_FMAGIC, n_gt_2_strided_cn) {
for (uint32_t n = 3; n < 4; n++) {
for (size_t k = 1; k <= 5; k += 2) {
GemmMicrokernelTester()
@@ -79989,12 +79989,12 @@
.n(2)
.k(k)
.cn_stride(5)
- .Test(xnn_qc8_gemm_minmax_fp32_ukernel_1x2__scalar_magic, xnn_init_qs8_minmax_scalar_magic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qc8_gemm_minmax_fp32_ukernel_1x2__scalar_fmagic, xnn_init_qs8_minmax_scalar_fmagic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
}
-TEST(QC8_GEMM_MINMAX_FP32_1X2__SCALAR_MAGIC, n_gt_2_strided_a) {
+TEST(QC8_GEMM_MINMAX_FP32_1X2__SCALAR_FMAGIC, n_gt_2_strided_a) {
for (uint32_t n = 3; n < 4; n++) {
for (size_t k = 1; k <= 5; k += 2) {
GemmMicrokernelTester()
@@ -80006,12 +80006,12 @@
.n(n)
.k(k)
.a_stride(7)
- .Test(xnn_qc8_gemm_minmax_fp32_ukernel_1x2__scalar_magic, xnn_init_qs8_minmax_scalar_magic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qc8_gemm_minmax_fp32_ukernel_1x2__scalar_fmagic, xnn_init_qs8_minmax_scalar_fmagic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
}
-TEST(QC8_GEMM_MINMAX_FP32_1X2__SCALAR_MAGIC, n_gt_2_subtile) {
+TEST(QC8_GEMM_MINMAX_FP32_1X2__SCALAR_FMAGIC, n_gt_2_subtile) {
for (uint32_t n = 3; n < 4; n++) {
for (size_t k = 1; k <= 5; k += 2) {
for (uint32_t m = 1; m <= 1; m++) {
@@ -80024,13 +80024,13 @@
.n(n)
.k(k)
.iterations(1)
- .Test(xnn_qc8_gemm_minmax_fp32_ukernel_1x2__scalar_magic, xnn_init_qs8_minmax_scalar_magic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qc8_gemm_minmax_fp32_ukernel_1x2__scalar_fmagic, xnn_init_qs8_minmax_scalar_fmagic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
}
}
-TEST(QC8_GEMM_MINMAX_FP32_1X2__SCALAR_MAGIC, n_div_2) {
+TEST(QC8_GEMM_MINMAX_FP32_1X2__SCALAR_FMAGIC, n_div_2) {
for (uint32_t n = 4; n <= 6; n += 2) {
for (size_t k = 1; k <= 5; k += 2) {
GemmMicrokernelTester()
@@ -80041,12 +80041,12 @@
.m(1)
.n(2)
.k(k)
- .Test(xnn_qc8_gemm_minmax_fp32_ukernel_1x2__scalar_magic, xnn_init_qs8_minmax_scalar_magic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qc8_gemm_minmax_fp32_ukernel_1x2__scalar_fmagic, xnn_init_qs8_minmax_scalar_fmagic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
}
-TEST(QC8_GEMM_MINMAX_FP32_1X2__SCALAR_MAGIC, n_div_2_strided_cn) {
+TEST(QC8_GEMM_MINMAX_FP32_1X2__SCALAR_FMAGIC, n_div_2_strided_cn) {
for (uint32_t n = 4; n <= 6; n += 2) {
for (size_t k = 1; k <= 5; k += 2) {
GemmMicrokernelTester()
@@ -80058,12 +80058,12 @@
.n(n)
.k(k)
.cn_stride(5)
- .Test(xnn_qc8_gemm_minmax_fp32_ukernel_1x2__scalar_magic, xnn_init_qs8_minmax_scalar_magic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qc8_gemm_minmax_fp32_ukernel_1x2__scalar_fmagic, xnn_init_qs8_minmax_scalar_fmagic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
}
-TEST(QC8_GEMM_MINMAX_FP32_1X2__SCALAR_MAGIC, n_div_2_strided_a) {
+TEST(QC8_GEMM_MINMAX_FP32_1X2__SCALAR_FMAGIC, n_div_2_strided_a) {
for (uint32_t n = 4; n <= 6; n += 2) {
for (size_t k = 1; k <= 5; k += 2) {
GemmMicrokernelTester()
@@ -80075,12 +80075,12 @@
.n(n)
.k(k)
.a_stride(7)
- .Test(xnn_qc8_gemm_minmax_fp32_ukernel_1x2__scalar_magic, xnn_init_qs8_minmax_scalar_magic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qc8_gemm_minmax_fp32_ukernel_1x2__scalar_fmagic, xnn_init_qs8_minmax_scalar_fmagic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
}
-TEST(QC8_GEMM_MINMAX_FP32_1X2__SCALAR_MAGIC, n_div_2_subtile) {
+TEST(QC8_GEMM_MINMAX_FP32_1X2__SCALAR_FMAGIC, n_div_2_subtile) {
for (uint32_t n = 4; n <= 6; n += 2) {
for (size_t k = 1; k <= 5; k += 2) {
for (uint32_t m = 1; m <= 1; m++) {
@@ -80093,13 +80093,13 @@
.n(n)
.k(k)
.iterations(1)
- .Test(xnn_qc8_gemm_minmax_fp32_ukernel_1x2__scalar_magic, xnn_init_qs8_minmax_scalar_magic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qc8_gemm_minmax_fp32_ukernel_1x2__scalar_fmagic, xnn_init_qs8_minmax_scalar_fmagic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
}
}
-TEST(QC8_GEMM_MINMAX_FP32_1X2__SCALAR_MAGIC, strided_cm_subtile) {
+TEST(QC8_GEMM_MINMAX_FP32_1X2__SCALAR_FMAGIC, strided_cm_subtile) {
for (size_t k = 1; k <= 5; k += 2) {
for (uint32_t m = 1; m <= 1; m++) {
for (uint32_t n = 1; n <= 2; n++) {
@@ -80113,13 +80113,13 @@
.k(k)
.cm_stride(5)
.iterations(1)
- .Test(xnn_qc8_gemm_minmax_fp32_ukernel_1x2__scalar_magic, xnn_init_qs8_minmax_scalar_magic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qc8_gemm_minmax_fp32_ukernel_1x2__scalar_fmagic, xnn_init_qs8_minmax_scalar_fmagic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
}
}
-TEST(QC8_GEMM_MINMAX_FP32_1X2__SCALAR_MAGIC, qmin) {
+TEST(QC8_GEMM_MINMAX_FP32_1X2__SCALAR_FMAGIC, qmin) {
GemmMicrokernelTester()
.mr(1)
.nr(2)
@@ -80129,10 +80129,10 @@
.n(2)
.k(1)
.qmin(128)
- .Test(xnn_qc8_gemm_minmax_fp32_ukernel_1x2__scalar_magic, xnn_init_qs8_minmax_scalar_magic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qc8_gemm_minmax_fp32_ukernel_1x2__scalar_fmagic, xnn_init_qs8_minmax_scalar_fmagic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
-TEST(QC8_GEMM_MINMAX_FP32_1X2__SCALAR_MAGIC, qmax) {
+TEST(QC8_GEMM_MINMAX_FP32_1X2__SCALAR_FMAGIC, qmax) {
GemmMicrokernelTester()
.mr(1)
.nr(2)
@@ -80142,10 +80142,10 @@
.n(2)
.k(1)
.qmax(128)
- .Test(xnn_qc8_gemm_minmax_fp32_ukernel_1x2__scalar_magic, xnn_init_qs8_minmax_scalar_magic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qc8_gemm_minmax_fp32_ukernel_1x2__scalar_fmagic, xnn_init_qs8_minmax_scalar_fmagic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
-TEST(QC8_GEMM_MINMAX_FP32_1X2__SCALAR_MAGIC, strided_cm) {
+TEST(QC8_GEMM_MINMAX_FP32_1X2__SCALAR_FMAGIC, strided_cm) {
GemmMicrokernelTester()
.mr(1)
.nr(2)
@@ -80155,11 +80155,11 @@
.n(2)
.k(1)
.cm_stride(5)
- .Test(xnn_qc8_gemm_minmax_fp32_ukernel_1x2__scalar_magic, xnn_init_qs8_minmax_scalar_magic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qc8_gemm_minmax_fp32_ukernel_1x2__scalar_fmagic, xnn_init_qs8_minmax_scalar_fmagic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
-TEST(QC8_GEMM_MINMAX_FP32_2X2__SCALAR_MAGIC, k_eq_1) {
+TEST(QC8_GEMM_MINMAX_FP32_2X2__SCALAR_FMAGIC, k_eq_1) {
GemmMicrokernelTester()
.mr(2)
.nr(2)
@@ -80168,10 +80168,10 @@
.m(2)
.n(2)
.k(1)
- .Test(xnn_qc8_gemm_minmax_fp32_ukernel_2x2__scalar_magic, xnn_init_qs8_minmax_scalar_magic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qc8_gemm_minmax_fp32_ukernel_2x2__scalar_fmagic, xnn_init_qs8_minmax_scalar_fmagic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
-TEST(QC8_GEMM_MINMAX_FP32_2X2__SCALAR_MAGIC, strided_cn) {
+TEST(QC8_GEMM_MINMAX_FP32_2X2__SCALAR_FMAGIC, strided_cn) {
GemmMicrokernelTester()
.mr(2)
.nr(2)
@@ -80181,10 +80181,10 @@
.n(2)
.k(1)
.cn_stride(5)
- .Test(xnn_qc8_gemm_minmax_fp32_ukernel_2x2__scalar_magic, xnn_init_qs8_minmax_scalar_magic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qc8_gemm_minmax_fp32_ukernel_2x2__scalar_fmagic, xnn_init_qs8_minmax_scalar_fmagic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
-TEST(QC8_GEMM_MINMAX_FP32_2X2__SCALAR_MAGIC, k_eq_1_strided_a) {
+TEST(QC8_GEMM_MINMAX_FP32_2X2__SCALAR_FMAGIC, k_eq_1_strided_a) {
GemmMicrokernelTester()
.mr(2)
.nr(2)
@@ -80194,10 +80194,10 @@
.n(2)
.k(1)
.a_stride(3)
- .Test(xnn_qc8_gemm_minmax_fp32_ukernel_2x2__scalar_magic, xnn_init_qs8_minmax_scalar_magic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qc8_gemm_minmax_fp32_ukernel_2x2__scalar_fmagic, xnn_init_qs8_minmax_scalar_fmagic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
-TEST(QC8_GEMM_MINMAX_FP32_2X2__SCALAR_MAGIC, k_eq_1_subtile) {
+TEST(QC8_GEMM_MINMAX_FP32_2X2__SCALAR_FMAGIC, k_eq_1_subtile) {
for (uint32_t m = 1; m <= 2; m++) {
for (uint32_t n = 1; n <= 2; n++) {
GemmMicrokernelTester()
@@ -80209,12 +80209,12 @@
.n(n)
.k(1)
.iterations(1)
- .Test(xnn_qc8_gemm_minmax_fp32_ukernel_2x2__scalar_magic, xnn_init_qs8_minmax_scalar_magic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qc8_gemm_minmax_fp32_ukernel_2x2__scalar_fmagic, xnn_init_qs8_minmax_scalar_fmagic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
}
-TEST(QC8_GEMM_MINMAX_FP32_2X2__SCALAR_MAGIC, k_eq_1_subtile_m) {
+TEST(QC8_GEMM_MINMAX_FP32_2X2__SCALAR_FMAGIC, k_eq_1_subtile_m) {
for (uint32_t m = 1; m <= 2; m++) {
GemmMicrokernelTester()
.mr(2)
@@ -80225,11 +80225,11 @@
.n(2)
.k(1)
.iterations(1)
- .Test(xnn_qc8_gemm_minmax_fp32_ukernel_2x2__scalar_magic, xnn_init_qs8_minmax_scalar_magic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qc8_gemm_minmax_fp32_ukernel_2x2__scalar_fmagic, xnn_init_qs8_minmax_scalar_fmagic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
-TEST(QC8_GEMM_MINMAX_FP32_2X2__SCALAR_MAGIC, k_eq_1_subtile_n) {
+TEST(QC8_GEMM_MINMAX_FP32_2X2__SCALAR_FMAGIC, k_eq_1_subtile_n) {
for (uint32_t n = 1; n <= 2; n++) {
GemmMicrokernelTester()
.mr(2)
@@ -80240,11 +80240,11 @@
.n(n)
.k(1)
.iterations(1)
- .Test(xnn_qc8_gemm_minmax_fp32_ukernel_2x2__scalar_magic, xnn_init_qs8_minmax_scalar_magic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qc8_gemm_minmax_fp32_ukernel_2x2__scalar_fmagic, xnn_init_qs8_minmax_scalar_fmagic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
-TEST(QC8_GEMM_MINMAX_FP32_2X2__SCALAR_MAGIC, k_gt_1) {
+TEST(QC8_GEMM_MINMAX_FP32_2X2__SCALAR_FMAGIC, k_gt_1) {
for (size_t k = 2; k < 10; k++) {
GemmMicrokernelTester()
.mr(2)
@@ -80254,11 +80254,11 @@
.m(2)
.n(2)
.k(k)
- .Test(xnn_qc8_gemm_minmax_fp32_ukernel_2x2__scalar_magic, xnn_init_qs8_minmax_scalar_magic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qc8_gemm_minmax_fp32_ukernel_2x2__scalar_fmagic, xnn_init_qs8_minmax_scalar_fmagic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
-TEST(QC8_GEMM_MINMAX_FP32_2X2__SCALAR_MAGIC, k_gt_1_strided_a) {
+TEST(QC8_GEMM_MINMAX_FP32_2X2__SCALAR_FMAGIC, k_gt_1_strided_a) {
for (size_t k = 2; k < 10; k++) {
GemmMicrokernelTester()
.mr(2)
@@ -80269,11 +80269,11 @@
.n(2)
.k(k)
.a_stride(11)
- .Test(xnn_qc8_gemm_minmax_fp32_ukernel_2x2__scalar_magic, xnn_init_qs8_minmax_scalar_magic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qc8_gemm_minmax_fp32_ukernel_2x2__scalar_fmagic, xnn_init_qs8_minmax_scalar_fmagic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
-TEST(QC8_GEMM_MINMAX_FP32_2X2__SCALAR_MAGIC, k_gt_1_subtile) {
+TEST(QC8_GEMM_MINMAX_FP32_2X2__SCALAR_FMAGIC, k_gt_1_subtile) {
for (size_t k = 2; k < 10; k++) {
for (uint32_t m = 1; m <= 2; m++) {
for (uint32_t n = 1; n <= 2; n++) {
@@ -80286,13 +80286,13 @@
.n(n)
.k(k)
.iterations(1)
- .Test(xnn_qc8_gemm_minmax_fp32_ukernel_2x2__scalar_magic, xnn_init_qs8_minmax_scalar_magic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qc8_gemm_minmax_fp32_ukernel_2x2__scalar_fmagic, xnn_init_qs8_minmax_scalar_fmagic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
}
}
-TEST(QC8_GEMM_MINMAX_FP32_2X2__SCALAR_MAGIC, n_gt_2) {
+TEST(QC8_GEMM_MINMAX_FP32_2X2__SCALAR_FMAGIC, n_gt_2) {
for (uint32_t n = 3; n < 4; n++) {
for (size_t k = 1; k <= 5; k += 2) {
GemmMicrokernelTester()
@@ -80303,12 +80303,12 @@
.m(2)
.n(2)
.k(k)
- .Test(xnn_qc8_gemm_minmax_fp32_ukernel_2x2__scalar_magic, xnn_init_qs8_minmax_scalar_magic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qc8_gemm_minmax_fp32_ukernel_2x2__scalar_fmagic, xnn_init_qs8_minmax_scalar_fmagic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
}
-TEST(QC8_GEMM_MINMAX_FP32_2X2__SCALAR_MAGIC, n_gt_2_strided_cn) {
+TEST(QC8_GEMM_MINMAX_FP32_2X2__SCALAR_FMAGIC, n_gt_2_strided_cn) {
for (uint32_t n = 3; n < 4; n++) {
for (size_t k = 1; k <= 5; k += 2) {
GemmMicrokernelTester()
@@ -80320,12 +80320,12 @@
.n(2)
.k(k)
.cn_stride(5)
- .Test(xnn_qc8_gemm_minmax_fp32_ukernel_2x2__scalar_magic, xnn_init_qs8_minmax_scalar_magic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qc8_gemm_minmax_fp32_ukernel_2x2__scalar_fmagic, xnn_init_qs8_minmax_scalar_fmagic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
}
-TEST(QC8_GEMM_MINMAX_FP32_2X2__SCALAR_MAGIC, n_gt_2_strided_a) {
+TEST(QC8_GEMM_MINMAX_FP32_2X2__SCALAR_FMAGIC, n_gt_2_strided_a) {
for (uint32_t n = 3; n < 4; n++) {
for (size_t k = 1; k <= 5; k += 2) {
GemmMicrokernelTester()
@@ -80337,12 +80337,12 @@
.n(n)
.k(k)
.a_stride(7)
- .Test(xnn_qc8_gemm_minmax_fp32_ukernel_2x2__scalar_magic, xnn_init_qs8_minmax_scalar_magic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qc8_gemm_minmax_fp32_ukernel_2x2__scalar_fmagic, xnn_init_qs8_minmax_scalar_fmagic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
}
-TEST(QC8_GEMM_MINMAX_FP32_2X2__SCALAR_MAGIC, n_gt_2_subtile) {
+TEST(QC8_GEMM_MINMAX_FP32_2X2__SCALAR_FMAGIC, n_gt_2_subtile) {
for (uint32_t n = 3; n < 4; n++) {
for (size_t k = 1; k <= 5; k += 2) {
for (uint32_t m = 1; m <= 2; m++) {
@@ -80355,13 +80355,13 @@
.n(n)
.k(k)
.iterations(1)
- .Test(xnn_qc8_gemm_minmax_fp32_ukernel_2x2__scalar_magic, xnn_init_qs8_minmax_scalar_magic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qc8_gemm_minmax_fp32_ukernel_2x2__scalar_fmagic, xnn_init_qs8_minmax_scalar_fmagic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
}
}
-TEST(QC8_GEMM_MINMAX_FP32_2X2__SCALAR_MAGIC, n_div_2) {
+TEST(QC8_GEMM_MINMAX_FP32_2X2__SCALAR_FMAGIC, n_div_2) {
for (uint32_t n = 4; n <= 6; n += 2) {
for (size_t k = 1; k <= 5; k += 2) {
GemmMicrokernelTester()
@@ -80372,12 +80372,12 @@
.m(2)
.n(2)
.k(k)
- .Test(xnn_qc8_gemm_minmax_fp32_ukernel_2x2__scalar_magic, xnn_init_qs8_minmax_scalar_magic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qc8_gemm_minmax_fp32_ukernel_2x2__scalar_fmagic, xnn_init_qs8_minmax_scalar_fmagic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
}
-TEST(QC8_GEMM_MINMAX_FP32_2X2__SCALAR_MAGIC, n_div_2_strided_cn) {
+TEST(QC8_GEMM_MINMAX_FP32_2X2__SCALAR_FMAGIC, n_div_2_strided_cn) {
for (uint32_t n = 4; n <= 6; n += 2) {
for (size_t k = 1; k <= 5; k += 2) {
GemmMicrokernelTester()
@@ -80389,12 +80389,12 @@
.n(n)
.k(k)
.cn_stride(5)
- .Test(xnn_qc8_gemm_minmax_fp32_ukernel_2x2__scalar_magic, xnn_init_qs8_minmax_scalar_magic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qc8_gemm_minmax_fp32_ukernel_2x2__scalar_fmagic, xnn_init_qs8_minmax_scalar_fmagic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
}
-TEST(QC8_GEMM_MINMAX_FP32_2X2__SCALAR_MAGIC, n_div_2_strided_a) {
+TEST(QC8_GEMM_MINMAX_FP32_2X2__SCALAR_FMAGIC, n_div_2_strided_a) {
for (uint32_t n = 4; n <= 6; n += 2) {
for (size_t k = 1; k <= 5; k += 2) {
GemmMicrokernelTester()
@@ -80406,12 +80406,12 @@
.n(n)
.k(k)
.a_stride(7)
- .Test(xnn_qc8_gemm_minmax_fp32_ukernel_2x2__scalar_magic, xnn_init_qs8_minmax_scalar_magic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qc8_gemm_minmax_fp32_ukernel_2x2__scalar_fmagic, xnn_init_qs8_minmax_scalar_fmagic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
}
-TEST(QC8_GEMM_MINMAX_FP32_2X2__SCALAR_MAGIC, n_div_2_subtile) {
+TEST(QC8_GEMM_MINMAX_FP32_2X2__SCALAR_FMAGIC, n_div_2_subtile) {
for (uint32_t n = 4; n <= 6; n += 2) {
for (size_t k = 1; k <= 5; k += 2) {
for (uint32_t m = 1; m <= 2; m++) {
@@ -80424,13 +80424,13 @@
.n(n)
.k(k)
.iterations(1)
- .Test(xnn_qc8_gemm_minmax_fp32_ukernel_2x2__scalar_magic, xnn_init_qs8_minmax_scalar_magic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qc8_gemm_minmax_fp32_ukernel_2x2__scalar_fmagic, xnn_init_qs8_minmax_scalar_fmagic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
}
}
-TEST(QC8_GEMM_MINMAX_FP32_2X2__SCALAR_MAGIC, strided_cm_subtile) {
+TEST(QC8_GEMM_MINMAX_FP32_2X2__SCALAR_FMAGIC, strided_cm_subtile) {
for (size_t k = 1; k <= 5; k += 2) {
for (uint32_t m = 1; m <= 2; m++) {
for (uint32_t n = 1; n <= 2; n++) {
@@ -80444,13 +80444,13 @@
.k(k)
.cm_stride(5)
.iterations(1)
- .Test(xnn_qc8_gemm_minmax_fp32_ukernel_2x2__scalar_magic, xnn_init_qs8_minmax_scalar_magic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qc8_gemm_minmax_fp32_ukernel_2x2__scalar_fmagic, xnn_init_qs8_minmax_scalar_fmagic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
}
}
-TEST(QC8_GEMM_MINMAX_FP32_2X2__SCALAR_MAGIC, qmin) {
+TEST(QC8_GEMM_MINMAX_FP32_2X2__SCALAR_FMAGIC, qmin) {
GemmMicrokernelTester()
.mr(2)
.nr(2)
@@ -80460,10 +80460,10 @@
.n(2)
.k(1)
.qmin(128)
- .Test(xnn_qc8_gemm_minmax_fp32_ukernel_2x2__scalar_magic, xnn_init_qs8_minmax_scalar_magic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qc8_gemm_minmax_fp32_ukernel_2x2__scalar_fmagic, xnn_init_qs8_minmax_scalar_fmagic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
-TEST(QC8_GEMM_MINMAX_FP32_2X2__SCALAR_MAGIC, qmax) {
+TEST(QC8_GEMM_MINMAX_FP32_2X2__SCALAR_FMAGIC, qmax) {
GemmMicrokernelTester()
.mr(2)
.nr(2)
@@ -80473,10 +80473,10 @@
.n(2)
.k(1)
.qmax(128)
- .Test(xnn_qc8_gemm_minmax_fp32_ukernel_2x2__scalar_magic, xnn_init_qs8_minmax_scalar_magic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qc8_gemm_minmax_fp32_ukernel_2x2__scalar_fmagic, xnn_init_qs8_minmax_scalar_fmagic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
-TEST(QC8_GEMM_MINMAX_FP32_2X2__SCALAR_MAGIC, strided_cm) {
+TEST(QC8_GEMM_MINMAX_FP32_2X2__SCALAR_FMAGIC, strided_cm) {
GemmMicrokernelTester()
.mr(2)
.nr(2)
@@ -80486,11 +80486,11 @@
.n(2)
.k(1)
.cm_stride(5)
- .Test(xnn_qc8_gemm_minmax_fp32_ukernel_2x2__scalar_magic, xnn_init_qs8_minmax_scalar_magic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qc8_gemm_minmax_fp32_ukernel_2x2__scalar_fmagic, xnn_init_qs8_minmax_scalar_fmagic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
-TEST(QC8_GEMM_MINMAX_FP32_3X2__SCALAR_MAGIC, k_eq_1) {
+TEST(QC8_GEMM_MINMAX_FP32_3X2__SCALAR_FMAGIC, k_eq_1) {
GemmMicrokernelTester()
.mr(3)
.nr(2)
@@ -80499,10 +80499,10 @@
.m(3)
.n(2)
.k(1)
- .Test(xnn_qc8_gemm_minmax_fp32_ukernel_3x2__scalar_magic, xnn_init_qs8_minmax_scalar_magic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qc8_gemm_minmax_fp32_ukernel_3x2__scalar_fmagic, xnn_init_qs8_minmax_scalar_fmagic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
-TEST(QC8_GEMM_MINMAX_FP32_3X2__SCALAR_MAGIC, strided_cn) {
+TEST(QC8_GEMM_MINMAX_FP32_3X2__SCALAR_FMAGIC, strided_cn) {
GemmMicrokernelTester()
.mr(3)
.nr(2)
@@ -80512,10 +80512,10 @@
.n(2)
.k(1)
.cn_stride(5)
- .Test(xnn_qc8_gemm_minmax_fp32_ukernel_3x2__scalar_magic, xnn_init_qs8_minmax_scalar_magic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qc8_gemm_minmax_fp32_ukernel_3x2__scalar_fmagic, xnn_init_qs8_minmax_scalar_fmagic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
-TEST(QC8_GEMM_MINMAX_FP32_3X2__SCALAR_MAGIC, k_eq_1_strided_a) {
+TEST(QC8_GEMM_MINMAX_FP32_3X2__SCALAR_FMAGIC, k_eq_1_strided_a) {
GemmMicrokernelTester()
.mr(3)
.nr(2)
@@ -80525,10 +80525,10 @@
.n(2)
.k(1)
.a_stride(3)
- .Test(xnn_qc8_gemm_minmax_fp32_ukernel_3x2__scalar_magic, xnn_init_qs8_minmax_scalar_magic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qc8_gemm_minmax_fp32_ukernel_3x2__scalar_fmagic, xnn_init_qs8_minmax_scalar_fmagic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
-TEST(QC8_GEMM_MINMAX_FP32_3X2__SCALAR_MAGIC, k_eq_1_subtile) {
+TEST(QC8_GEMM_MINMAX_FP32_3X2__SCALAR_FMAGIC, k_eq_1_subtile) {
for (uint32_t m = 1; m <= 3; m++) {
for (uint32_t n = 1; n <= 2; n++) {
GemmMicrokernelTester()
@@ -80540,12 +80540,12 @@
.n(n)
.k(1)
.iterations(1)
- .Test(xnn_qc8_gemm_minmax_fp32_ukernel_3x2__scalar_magic, xnn_init_qs8_minmax_scalar_magic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qc8_gemm_minmax_fp32_ukernel_3x2__scalar_fmagic, xnn_init_qs8_minmax_scalar_fmagic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
}
-TEST(QC8_GEMM_MINMAX_FP32_3X2__SCALAR_MAGIC, k_eq_1_subtile_m) {
+TEST(QC8_GEMM_MINMAX_FP32_3X2__SCALAR_FMAGIC, k_eq_1_subtile_m) {
for (uint32_t m = 1; m <= 3; m++) {
GemmMicrokernelTester()
.mr(3)
@@ -80556,11 +80556,11 @@
.n(2)
.k(1)
.iterations(1)
- .Test(xnn_qc8_gemm_minmax_fp32_ukernel_3x2__scalar_magic, xnn_init_qs8_minmax_scalar_magic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qc8_gemm_minmax_fp32_ukernel_3x2__scalar_fmagic, xnn_init_qs8_minmax_scalar_fmagic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
-TEST(QC8_GEMM_MINMAX_FP32_3X2__SCALAR_MAGIC, k_eq_1_subtile_n) {
+TEST(QC8_GEMM_MINMAX_FP32_3X2__SCALAR_FMAGIC, k_eq_1_subtile_n) {
for (uint32_t n = 1; n <= 2; n++) {
GemmMicrokernelTester()
.mr(3)
@@ -80571,11 +80571,11 @@
.n(n)
.k(1)
.iterations(1)
- .Test(xnn_qc8_gemm_minmax_fp32_ukernel_3x2__scalar_magic, xnn_init_qs8_minmax_scalar_magic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qc8_gemm_minmax_fp32_ukernel_3x2__scalar_fmagic, xnn_init_qs8_minmax_scalar_fmagic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
-TEST(QC8_GEMM_MINMAX_FP32_3X2__SCALAR_MAGIC, k_gt_1) {
+TEST(QC8_GEMM_MINMAX_FP32_3X2__SCALAR_FMAGIC, k_gt_1) {
for (size_t k = 2; k < 10; k++) {
GemmMicrokernelTester()
.mr(3)
@@ -80585,11 +80585,11 @@
.m(3)
.n(2)
.k(k)
- .Test(xnn_qc8_gemm_minmax_fp32_ukernel_3x2__scalar_magic, xnn_init_qs8_minmax_scalar_magic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qc8_gemm_minmax_fp32_ukernel_3x2__scalar_fmagic, xnn_init_qs8_minmax_scalar_fmagic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
-TEST(QC8_GEMM_MINMAX_FP32_3X2__SCALAR_MAGIC, k_gt_1_strided_a) {
+TEST(QC8_GEMM_MINMAX_FP32_3X2__SCALAR_FMAGIC, k_gt_1_strided_a) {
for (size_t k = 2; k < 10; k++) {
GemmMicrokernelTester()
.mr(3)
@@ -80600,11 +80600,11 @@
.n(2)
.k(k)
.a_stride(11)
- .Test(xnn_qc8_gemm_minmax_fp32_ukernel_3x2__scalar_magic, xnn_init_qs8_minmax_scalar_magic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qc8_gemm_minmax_fp32_ukernel_3x2__scalar_fmagic, xnn_init_qs8_minmax_scalar_fmagic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
-TEST(QC8_GEMM_MINMAX_FP32_3X2__SCALAR_MAGIC, k_gt_1_subtile) {
+TEST(QC8_GEMM_MINMAX_FP32_3X2__SCALAR_FMAGIC, k_gt_1_subtile) {
for (size_t k = 2; k < 10; k++) {
for (uint32_t m = 1; m <= 3; m++) {
for (uint32_t n = 1; n <= 2; n++) {
@@ -80617,13 +80617,13 @@
.n(n)
.k(k)
.iterations(1)
- .Test(xnn_qc8_gemm_minmax_fp32_ukernel_3x2__scalar_magic, xnn_init_qs8_minmax_scalar_magic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qc8_gemm_minmax_fp32_ukernel_3x2__scalar_fmagic, xnn_init_qs8_minmax_scalar_fmagic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
}
}
-TEST(QC8_GEMM_MINMAX_FP32_3X2__SCALAR_MAGIC, n_gt_2) {
+TEST(QC8_GEMM_MINMAX_FP32_3X2__SCALAR_FMAGIC, n_gt_2) {
for (uint32_t n = 3; n < 4; n++) {
for (size_t k = 1; k <= 5; k += 2) {
GemmMicrokernelTester()
@@ -80634,12 +80634,12 @@
.m(3)
.n(2)
.k(k)
- .Test(xnn_qc8_gemm_minmax_fp32_ukernel_3x2__scalar_magic, xnn_init_qs8_minmax_scalar_magic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qc8_gemm_minmax_fp32_ukernel_3x2__scalar_fmagic, xnn_init_qs8_minmax_scalar_fmagic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
}
-TEST(QC8_GEMM_MINMAX_FP32_3X2__SCALAR_MAGIC, n_gt_2_strided_cn) {
+TEST(QC8_GEMM_MINMAX_FP32_3X2__SCALAR_FMAGIC, n_gt_2_strided_cn) {
for (uint32_t n = 3; n < 4; n++) {
for (size_t k = 1; k <= 5; k += 2) {
GemmMicrokernelTester()
@@ -80651,12 +80651,12 @@
.n(2)
.k(k)
.cn_stride(5)
- .Test(xnn_qc8_gemm_minmax_fp32_ukernel_3x2__scalar_magic, xnn_init_qs8_minmax_scalar_magic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qc8_gemm_minmax_fp32_ukernel_3x2__scalar_fmagic, xnn_init_qs8_minmax_scalar_fmagic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
}
-TEST(QC8_GEMM_MINMAX_FP32_3X2__SCALAR_MAGIC, n_gt_2_strided_a) {
+TEST(QC8_GEMM_MINMAX_FP32_3X2__SCALAR_FMAGIC, n_gt_2_strided_a) {
for (uint32_t n = 3; n < 4; n++) {
for (size_t k = 1; k <= 5; k += 2) {
GemmMicrokernelTester()
@@ -80668,12 +80668,12 @@
.n(n)
.k(k)
.a_stride(7)
- .Test(xnn_qc8_gemm_minmax_fp32_ukernel_3x2__scalar_magic, xnn_init_qs8_minmax_scalar_magic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qc8_gemm_minmax_fp32_ukernel_3x2__scalar_fmagic, xnn_init_qs8_minmax_scalar_fmagic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
}
-TEST(QC8_GEMM_MINMAX_FP32_3X2__SCALAR_MAGIC, n_gt_2_subtile) {
+TEST(QC8_GEMM_MINMAX_FP32_3X2__SCALAR_FMAGIC, n_gt_2_subtile) {
for (uint32_t n = 3; n < 4; n++) {
for (size_t k = 1; k <= 5; k += 2) {
for (uint32_t m = 1; m <= 3; m++) {
@@ -80686,13 +80686,13 @@
.n(n)
.k(k)
.iterations(1)
- .Test(xnn_qc8_gemm_minmax_fp32_ukernel_3x2__scalar_magic, xnn_init_qs8_minmax_scalar_magic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qc8_gemm_minmax_fp32_ukernel_3x2__scalar_fmagic, xnn_init_qs8_minmax_scalar_fmagic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
}
}
-TEST(QC8_GEMM_MINMAX_FP32_3X2__SCALAR_MAGIC, n_div_2) {
+TEST(QC8_GEMM_MINMAX_FP32_3X2__SCALAR_FMAGIC, n_div_2) {
for (uint32_t n = 4; n <= 6; n += 2) {
for (size_t k = 1; k <= 5; k += 2) {
GemmMicrokernelTester()
@@ -80703,12 +80703,12 @@
.m(3)
.n(2)
.k(k)
- .Test(xnn_qc8_gemm_minmax_fp32_ukernel_3x2__scalar_magic, xnn_init_qs8_minmax_scalar_magic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qc8_gemm_minmax_fp32_ukernel_3x2__scalar_fmagic, xnn_init_qs8_minmax_scalar_fmagic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
}
-TEST(QC8_GEMM_MINMAX_FP32_3X2__SCALAR_MAGIC, n_div_2_strided_cn) {
+TEST(QC8_GEMM_MINMAX_FP32_3X2__SCALAR_FMAGIC, n_div_2_strided_cn) {
for (uint32_t n = 4; n <= 6; n += 2) {
for (size_t k = 1; k <= 5; k += 2) {
GemmMicrokernelTester()
@@ -80720,12 +80720,12 @@
.n(n)
.k(k)
.cn_stride(5)
- .Test(xnn_qc8_gemm_minmax_fp32_ukernel_3x2__scalar_magic, xnn_init_qs8_minmax_scalar_magic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qc8_gemm_minmax_fp32_ukernel_3x2__scalar_fmagic, xnn_init_qs8_minmax_scalar_fmagic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
}
-TEST(QC8_GEMM_MINMAX_FP32_3X2__SCALAR_MAGIC, n_div_2_strided_a) {
+TEST(QC8_GEMM_MINMAX_FP32_3X2__SCALAR_FMAGIC, n_div_2_strided_a) {
for (uint32_t n = 4; n <= 6; n += 2) {
for (size_t k = 1; k <= 5; k += 2) {
GemmMicrokernelTester()
@@ -80737,12 +80737,12 @@
.n(n)
.k(k)
.a_stride(7)
- .Test(xnn_qc8_gemm_minmax_fp32_ukernel_3x2__scalar_magic, xnn_init_qs8_minmax_scalar_magic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qc8_gemm_minmax_fp32_ukernel_3x2__scalar_fmagic, xnn_init_qs8_minmax_scalar_fmagic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
}
-TEST(QC8_GEMM_MINMAX_FP32_3X2__SCALAR_MAGIC, n_div_2_subtile) {
+TEST(QC8_GEMM_MINMAX_FP32_3X2__SCALAR_FMAGIC, n_div_2_subtile) {
for (uint32_t n = 4; n <= 6; n += 2) {
for (size_t k = 1; k <= 5; k += 2) {
for (uint32_t m = 1; m <= 3; m++) {
@@ -80755,13 +80755,13 @@
.n(n)
.k(k)
.iterations(1)
- .Test(xnn_qc8_gemm_minmax_fp32_ukernel_3x2__scalar_magic, xnn_init_qs8_minmax_scalar_magic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qc8_gemm_minmax_fp32_ukernel_3x2__scalar_fmagic, xnn_init_qs8_minmax_scalar_fmagic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
}
}
-TEST(QC8_GEMM_MINMAX_FP32_3X2__SCALAR_MAGIC, strided_cm_subtile) {
+TEST(QC8_GEMM_MINMAX_FP32_3X2__SCALAR_FMAGIC, strided_cm_subtile) {
for (size_t k = 1; k <= 5; k += 2) {
for (uint32_t m = 1; m <= 3; m++) {
for (uint32_t n = 1; n <= 2; n++) {
@@ -80775,13 +80775,13 @@
.k(k)
.cm_stride(5)
.iterations(1)
- .Test(xnn_qc8_gemm_minmax_fp32_ukernel_3x2__scalar_magic, xnn_init_qs8_minmax_scalar_magic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qc8_gemm_minmax_fp32_ukernel_3x2__scalar_fmagic, xnn_init_qs8_minmax_scalar_fmagic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
}
}
-TEST(QC8_GEMM_MINMAX_FP32_3X2__SCALAR_MAGIC, qmin) {
+TEST(QC8_GEMM_MINMAX_FP32_3X2__SCALAR_FMAGIC, qmin) {
GemmMicrokernelTester()
.mr(3)
.nr(2)
@@ -80791,10 +80791,10 @@
.n(2)
.k(1)
.qmin(128)
- .Test(xnn_qc8_gemm_minmax_fp32_ukernel_3x2__scalar_magic, xnn_init_qs8_minmax_scalar_magic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qc8_gemm_minmax_fp32_ukernel_3x2__scalar_fmagic, xnn_init_qs8_minmax_scalar_fmagic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
-TEST(QC8_GEMM_MINMAX_FP32_3X2__SCALAR_MAGIC, qmax) {
+TEST(QC8_GEMM_MINMAX_FP32_3X2__SCALAR_FMAGIC, qmax) {
GemmMicrokernelTester()
.mr(3)
.nr(2)
@@ -80804,10 +80804,10 @@
.n(2)
.k(1)
.qmax(128)
- .Test(xnn_qc8_gemm_minmax_fp32_ukernel_3x2__scalar_magic, xnn_init_qs8_minmax_scalar_magic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qc8_gemm_minmax_fp32_ukernel_3x2__scalar_fmagic, xnn_init_qs8_minmax_scalar_fmagic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
-TEST(QC8_GEMM_MINMAX_FP32_3X2__SCALAR_MAGIC, strided_cm) {
+TEST(QC8_GEMM_MINMAX_FP32_3X2__SCALAR_FMAGIC, strided_cm) {
GemmMicrokernelTester()
.mr(3)
.nr(2)
@@ -80817,11 +80817,11 @@
.n(2)
.k(1)
.cm_stride(5)
- .Test(xnn_qc8_gemm_minmax_fp32_ukernel_3x2__scalar_magic, xnn_init_qs8_minmax_scalar_magic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qc8_gemm_minmax_fp32_ukernel_3x2__scalar_fmagic, xnn_init_qs8_minmax_scalar_fmagic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
-TEST(QC8_GEMM_MINMAX_FP32_4X2__SCALAR_MAGIC, k_eq_1) {
+TEST(QC8_GEMM_MINMAX_FP32_4X2__SCALAR_FMAGIC, k_eq_1) {
GemmMicrokernelTester()
.mr(4)
.nr(2)
@@ -80830,10 +80830,10 @@
.m(4)
.n(2)
.k(1)
- .Test(xnn_qc8_gemm_minmax_fp32_ukernel_4x2__scalar_magic, xnn_init_qs8_minmax_scalar_magic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qc8_gemm_minmax_fp32_ukernel_4x2__scalar_fmagic, xnn_init_qs8_minmax_scalar_fmagic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
-TEST(QC8_GEMM_MINMAX_FP32_4X2__SCALAR_MAGIC, strided_cn) {
+TEST(QC8_GEMM_MINMAX_FP32_4X2__SCALAR_FMAGIC, strided_cn) {
GemmMicrokernelTester()
.mr(4)
.nr(2)
@@ -80843,10 +80843,10 @@
.n(2)
.k(1)
.cn_stride(5)
- .Test(xnn_qc8_gemm_minmax_fp32_ukernel_4x2__scalar_magic, xnn_init_qs8_minmax_scalar_magic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qc8_gemm_minmax_fp32_ukernel_4x2__scalar_fmagic, xnn_init_qs8_minmax_scalar_fmagic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
-TEST(QC8_GEMM_MINMAX_FP32_4X2__SCALAR_MAGIC, k_eq_1_strided_a) {
+TEST(QC8_GEMM_MINMAX_FP32_4X2__SCALAR_FMAGIC, k_eq_1_strided_a) {
GemmMicrokernelTester()
.mr(4)
.nr(2)
@@ -80856,10 +80856,10 @@
.n(2)
.k(1)
.a_stride(3)
- .Test(xnn_qc8_gemm_minmax_fp32_ukernel_4x2__scalar_magic, xnn_init_qs8_minmax_scalar_magic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qc8_gemm_minmax_fp32_ukernel_4x2__scalar_fmagic, xnn_init_qs8_minmax_scalar_fmagic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
-TEST(QC8_GEMM_MINMAX_FP32_4X2__SCALAR_MAGIC, k_eq_1_subtile) {
+TEST(QC8_GEMM_MINMAX_FP32_4X2__SCALAR_FMAGIC, k_eq_1_subtile) {
for (uint32_t m = 1; m <= 4; m++) {
for (uint32_t n = 1; n <= 2; n++) {
GemmMicrokernelTester()
@@ -80871,12 +80871,12 @@
.n(n)
.k(1)
.iterations(1)
- .Test(xnn_qc8_gemm_minmax_fp32_ukernel_4x2__scalar_magic, xnn_init_qs8_minmax_scalar_magic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qc8_gemm_minmax_fp32_ukernel_4x2__scalar_fmagic, xnn_init_qs8_minmax_scalar_fmagic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
}
-TEST(QC8_GEMM_MINMAX_FP32_4X2__SCALAR_MAGIC, k_eq_1_subtile_m) {
+TEST(QC8_GEMM_MINMAX_FP32_4X2__SCALAR_FMAGIC, k_eq_1_subtile_m) {
for (uint32_t m = 1; m <= 4; m++) {
GemmMicrokernelTester()
.mr(4)
@@ -80887,11 +80887,11 @@
.n(2)
.k(1)
.iterations(1)
- .Test(xnn_qc8_gemm_minmax_fp32_ukernel_4x2__scalar_magic, xnn_init_qs8_minmax_scalar_magic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qc8_gemm_minmax_fp32_ukernel_4x2__scalar_fmagic, xnn_init_qs8_minmax_scalar_fmagic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
-TEST(QC8_GEMM_MINMAX_FP32_4X2__SCALAR_MAGIC, k_eq_1_subtile_n) {
+TEST(QC8_GEMM_MINMAX_FP32_4X2__SCALAR_FMAGIC, k_eq_1_subtile_n) {
for (uint32_t n = 1; n <= 2; n++) {
GemmMicrokernelTester()
.mr(4)
@@ -80902,11 +80902,11 @@
.n(n)
.k(1)
.iterations(1)
- .Test(xnn_qc8_gemm_minmax_fp32_ukernel_4x2__scalar_magic, xnn_init_qs8_minmax_scalar_magic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qc8_gemm_minmax_fp32_ukernel_4x2__scalar_fmagic, xnn_init_qs8_minmax_scalar_fmagic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
-TEST(QC8_GEMM_MINMAX_FP32_4X2__SCALAR_MAGIC, k_gt_1) {
+TEST(QC8_GEMM_MINMAX_FP32_4X2__SCALAR_FMAGIC, k_gt_1) {
for (size_t k = 2; k < 10; k++) {
GemmMicrokernelTester()
.mr(4)
@@ -80916,11 +80916,11 @@
.m(4)
.n(2)
.k(k)
- .Test(xnn_qc8_gemm_minmax_fp32_ukernel_4x2__scalar_magic, xnn_init_qs8_minmax_scalar_magic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qc8_gemm_minmax_fp32_ukernel_4x2__scalar_fmagic, xnn_init_qs8_minmax_scalar_fmagic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
-TEST(QC8_GEMM_MINMAX_FP32_4X2__SCALAR_MAGIC, k_gt_1_strided_a) {
+TEST(QC8_GEMM_MINMAX_FP32_4X2__SCALAR_FMAGIC, k_gt_1_strided_a) {
for (size_t k = 2; k < 10; k++) {
GemmMicrokernelTester()
.mr(4)
@@ -80931,11 +80931,11 @@
.n(2)
.k(k)
.a_stride(11)
- .Test(xnn_qc8_gemm_minmax_fp32_ukernel_4x2__scalar_magic, xnn_init_qs8_minmax_scalar_magic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qc8_gemm_minmax_fp32_ukernel_4x2__scalar_fmagic, xnn_init_qs8_minmax_scalar_fmagic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
-TEST(QC8_GEMM_MINMAX_FP32_4X2__SCALAR_MAGIC, k_gt_1_subtile) {
+TEST(QC8_GEMM_MINMAX_FP32_4X2__SCALAR_FMAGIC, k_gt_1_subtile) {
for (size_t k = 2; k < 10; k++) {
for (uint32_t m = 1; m <= 4; m++) {
for (uint32_t n = 1; n <= 2; n++) {
@@ -80948,13 +80948,13 @@
.n(n)
.k(k)
.iterations(1)
- .Test(xnn_qc8_gemm_minmax_fp32_ukernel_4x2__scalar_magic, xnn_init_qs8_minmax_scalar_magic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qc8_gemm_minmax_fp32_ukernel_4x2__scalar_fmagic, xnn_init_qs8_minmax_scalar_fmagic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
}
}
-TEST(QC8_GEMM_MINMAX_FP32_4X2__SCALAR_MAGIC, n_gt_2) {
+TEST(QC8_GEMM_MINMAX_FP32_4X2__SCALAR_FMAGIC, n_gt_2) {
for (uint32_t n = 3; n < 4; n++) {
for (size_t k = 1; k <= 5; k += 2) {
GemmMicrokernelTester()
@@ -80965,12 +80965,12 @@
.m(4)
.n(2)
.k(k)
- .Test(xnn_qc8_gemm_minmax_fp32_ukernel_4x2__scalar_magic, xnn_init_qs8_minmax_scalar_magic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qc8_gemm_minmax_fp32_ukernel_4x2__scalar_fmagic, xnn_init_qs8_minmax_scalar_fmagic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
}
-TEST(QC8_GEMM_MINMAX_FP32_4X2__SCALAR_MAGIC, n_gt_2_strided_cn) {
+TEST(QC8_GEMM_MINMAX_FP32_4X2__SCALAR_FMAGIC, n_gt_2_strided_cn) {
for (uint32_t n = 3; n < 4; n++) {
for (size_t k = 1; k <= 5; k += 2) {
GemmMicrokernelTester()
@@ -80982,12 +80982,12 @@
.n(2)
.k(k)
.cn_stride(5)
- .Test(xnn_qc8_gemm_minmax_fp32_ukernel_4x2__scalar_magic, xnn_init_qs8_minmax_scalar_magic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qc8_gemm_minmax_fp32_ukernel_4x2__scalar_fmagic, xnn_init_qs8_minmax_scalar_fmagic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
}
-TEST(QC8_GEMM_MINMAX_FP32_4X2__SCALAR_MAGIC, n_gt_2_strided_a) {
+TEST(QC8_GEMM_MINMAX_FP32_4X2__SCALAR_FMAGIC, n_gt_2_strided_a) {
for (uint32_t n = 3; n < 4; n++) {
for (size_t k = 1; k <= 5; k += 2) {
GemmMicrokernelTester()
@@ -80999,12 +80999,12 @@
.n(n)
.k(k)
.a_stride(7)
- .Test(xnn_qc8_gemm_minmax_fp32_ukernel_4x2__scalar_magic, xnn_init_qs8_minmax_scalar_magic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qc8_gemm_minmax_fp32_ukernel_4x2__scalar_fmagic, xnn_init_qs8_minmax_scalar_fmagic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
}
-TEST(QC8_GEMM_MINMAX_FP32_4X2__SCALAR_MAGIC, n_gt_2_subtile) {
+TEST(QC8_GEMM_MINMAX_FP32_4X2__SCALAR_FMAGIC, n_gt_2_subtile) {
for (uint32_t n = 3; n < 4; n++) {
for (size_t k = 1; k <= 5; k += 2) {
for (uint32_t m = 1; m <= 4; m++) {
@@ -81017,13 +81017,13 @@
.n(n)
.k(k)
.iterations(1)
- .Test(xnn_qc8_gemm_minmax_fp32_ukernel_4x2__scalar_magic, xnn_init_qs8_minmax_scalar_magic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qc8_gemm_minmax_fp32_ukernel_4x2__scalar_fmagic, xnn_init_qs8_minmax_scalar_fmagic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
}
}
-TEST(QC8_GEMM_MINMAX_FP32_4X2__SCALAR_MAGIC, n_div_2) {
+TEST(QC8_GEMM_MINMAX_FP32_4X2__SCALAR_FMAGIC, n_div_2) {
for (uint32_t n = 4; n <= 6; n += 2) {
for (size_t k = 1; k <= 5; k += 2) {
GemmMicrokernelTester()
@@ -81034,12 +81034,12 @@
.m(4)
.n(2)
.k(k)
- .Test(xnn_qc8_gemm_minmax_fp32_ukernel_4x2__scalar_magic, xnn_init_qs8_minmax_scalar_magic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qc8_gemm_minmax_fp32_ukernel_4x2__scalar_fmagic, xnn_init_qs8_minmax_scalar_fmagic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
}
-TEST(QC8_GEMM_MINMAX_FP32_4X2__SCALAR_MAGIC, n_div_2_strided_cn) {
+TEST(QC8_GEMM_MINMAX_FP32_4X2__SCALAR_FMAGIC, n_div_2_strided_cn) {
for (uint32_t n = 4; n <= 6; n += 2) {
for (size_t k = 1; k <= 5; k += 2) {
GemmMicrokernelTester()
@@ -81051,12 +81051,12 @@
.n(n)
.k(k)
.cn_stride(5)
- .Test(xnn_qc8_gemm_minmax_fp32_ukernel_4x2__scalar_magic, xnn_init_qs8_minmax_scalar_magic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qc8_gemm_minmax_fp32_ukernel_4x2__scalar_fmagic, xnn_init_qs8_minmax_scalar_fmagic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
}
-TEST(QC8_GEMM_MINMAX_FP32_4X2__SCALAR_MAGIC, n_div_2_strided_a) {
+TEST(QC8_GEMM_MINMAX_FP32_4X2__SCALAR_FMAGIC, n_div_2_strided_a) {
for (uint32_t n = 4; n <= 6; n += 2) {
for (size_t k = 1; k <= 5; k += 2) {
GemmMicrokernelTester()
@@ -81068,12 +81068,12 @@
.n(n)
.k(k)
.a_stride(7)
- .Test(xnn_qc8_gemm_minmax_fp32_ukernel_4x2__scalar_magic, xnn_init_qs8_minmax_scalar_magic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qc8_gemm_minmax_fp32_ukernel_4x2__scalar_fmagic, xnn_init_qs8_minmax_scalar_fmagic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
}
-TEST(QC8_GEMM_MINMAX_FP32_4X2__SCALAR_MAGIC, n_div_2_subtile) {
+TEST(QC8_GEMM_MINMAX_FP32_4X2__SCALAR_FMAGIC, n_div_2_subtile) {
for (uint32_t n = 4; n <= 6; n += 2) {
for (size_t k = 1; k <= 5; k += 2) {
for (uint32_t m = 1; m <= 4; m++) {
@@ -81086,13 +81086,13 @@
.n(n)
.k(k)
.iterations(1)
- .Test(xnn_qc8_gemm_minmax_fp32_ukernel_4x2__scalar_magic, xnn_init_qs8_minmax_scalar_magic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qc8_gemm_minmax_fp32_ukernel_4x2__scalar_fmagic, xnn_init_qs8_minmax_scalar_fmagic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
}
}
-TEST(QC8_GEMM_MINMAX_FP32_4X2__SCALAR_MAGIC, strided_cm_subtile) {
+TEST(QC8_GEMM_MINMAX_FP32_4X2__SCALAR_FMAGIC, strided_cm_subtile) {
for (size_t k = 1; k <= 5; k += 2) {
for (uint32_t m = 1; m <= 4; m++) {
for (uint32_t n = 1; n <= 2; n++) {
@@ -81106,13 +81106,13 @@
.k(k)
.cm_stride(5)
.iterations(1)
- .Test(xnn_qc8_gemm_minmax_fp32_ukernel_4x2__scalar_magic, xnn_init_qs8_minmax_scalar_magic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qc8_gemm_minmax_fp32_ukernel_4x2__scalar_fmagic, xnn_init_qs8_minmax_scalar_fmagic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
}
}
-TEST(QC8_GEMM_MINMAX_FP32_4X2__SCALAR_MAGIC, qmin) {
+TEST(QC8_GEMM_MINMAX_FP32_4X2__SCALAR_FMAGIC, qmin) {
GemmMicrokernelTester()
.mr(4)
.nr(2)
@@ -81122,10 +81122,10 @@
.n(2)
.k(1)
.qmin(128)
- .Test(xnn_qc8_gemm_minmax_fp32_ukernel_4x2__scalar_magic, xnn_init_qs8_minmax_scalar_magic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qc8_gemm_minmax_fp32_ukernel_4x2__scalar_fmagic, xnn_init_qs8_minmax_scalar_fmagic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
-TEST(QC8_GEMM_MINMAX_FP32_4X2__SCALAR_MAGIC, qmax) {
+TEST(QC8_GEMM_MINMAX_FP32_4X2__SCALAR_FMAGIC, qmax) {
GemmMicrokernelTester()
.mr(4)
.nr(2)
@@ -81135,10 +81135,10 @@
.n(2)
.k(1)
.qmax(128)
- .Test(xnn_qc8_gemm_minmax_fp32_ukernel_4x2__scalar_magic, xnn_init_qs8_minmax_scalar_magic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qc8_gemm_minmax_fp32_ukernel_4x2__scalar_fmagic, xnn_init_qs8_minmax_scalar_fmagic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
-TEST(QC8_GEMM_MINMAX_FP32_4X2__SCALAR_MAGIC, strided_cm) {
+TEST(QC8_GEMM_MINMAX_FP32_4X2__SCALAR_FMAGIC, strided_cm) {
GemmMicrokernelTester()
.mr(4)
.nr(2)
@@ -81148,11 +81148,11 @@
.n(2)
.k(1)
.cm_stride(5)
- .Test(xnn_qc8_gemm_minmax_fp32_ukernel_4x2__scalar_magic, xnn_init_qs8_minmax_scalar_magic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qc8_gemm_minmax_fp32_ukernel_4x2__scalar_fmagic, xnn_init_qs8_minmax_scalar_fmagic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
-TEST(QC8_GEMM_MINMAX_FP32_1X4__SCALAR_MAGIC, k_eq_1) {
+TEST(QC8_GEMM_MINMAX_FP32_1X4__SCALAR_FMAGIC, k_eq_1) {
GemmMicrokernelTester()
.mr(1)
.nr(4)
@@ -81161,10 +81161,10 @@
.m(1)
.n(4)
.k(1)
- .Test(xnn_qc8_gemm_minmax_fp32_ukernel_1x4__scalar_magic, xnn_init_qs8_minmax_scalar_magic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qc8_gemm_minmax_fp32_ukernel_1x4__scalar_fmagic, xnn_init_qs8_minmax_scalar_fmagic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
-TEST(QC8_GEMM_MINMAX_FP32_1X4__SCALAR_MAGIC, strided_cn) {
+TEST(QC8_GEMM_MINMAX_FP32_1X4__SCALAR_FMAGIC, strided_cn) {
GemmMicrokernelTester()
.mr(1)
.nr(4)
@@ -81174,10 +81174,10 @@
.n(4)
.k(1)
.cn_stride(7)
- .Test(xnn_qc8_gemm_minmax_fp32_ukernel_1x4__scalar_magic, xnn_init_qs8_minmax_scalar_magic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qc8_gemm_minmax_fp32_ukernel_1x4__scalar_fmagic, xnn_init_qs8_minmax_scalar_fmagic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
-TEST(QC8_GEMM_MINMAX_FP32_1X4__SCALAR_MAGIC, k_eq_1_strided_a) {
+TEST(QC8_GEMM_MINMAX_FP32_1X4__SCALAR_FMAGIC, k_eq_1_strided_a) {
GemmMicrokernelTester()
.mr(1)
.nr(4)
@@ -81187,10 +81187,10 @@
.n(4)
.k(1)
.a_stride(3)
- .Test(xnn_qc8_gemm_minmax_fp32_ukernel_1x4__scalar_magic, xnn_init_qs8_minmax_scalar_magic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qc8_gemm_minmax_fp32_ukernel_1x4__scalar_fmagic, xnn_init_qs8_minmax_scalar_fmagic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
-TEST(QC8_GEMM_MINMAX_FP32_1X4__SCALAR_MAGIC, k_eq_1_subtile) {
+TEST(QC8_GEMM_MINMAX_FP32_1X4__SCALAR_FMAGIC, k_eq_1_subtile) {
for (uint32_t m = 1; m <= 1; m++) {
for (uint32_t n = 1; n <= 4; n++) {
GemmMicrokernelTester()
@@ -81202,12 +81202,12 @@
.n(n)
.k(1)
.iterations(1)
- .Test(xnn_qc8_gemm_minmax_fp32_ukernel_1x4__scalar_magic, xnn_init_qs8_minmax_scalar_magic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qc8_gemm_minmax_fp32_ukernel_1x4__scalar_fmagic, xnn_init_qs8_minmax_scalar_fmagic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
}
-TEST(QC8_GEMM_MINMAX_FP32_1X4__SCALAR_MAGIC, k_eq_1_subtile_m) {
+TEST(QC8_GEMM_MINMAX_FP32_1X4__SCALAR_FMAGIC, k_eq_1_subtile_m) {
for (uint32_t m = 1; m <= 1; m++) {
GemmMicrokernelTester()
.mr(1)
@@ -81218,11 +81218,11 @@
.n(4)
.k(1)
.iterations(1)
- .Test(xnn_qc8_gemm_minmax_fp32_ukernel_1x4__scalar_magic, xnn_init_qs8_minmax_scalar_magic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qc8_gemm_minmax_fp32_ukernel_1x4__scalar_fmagic, xnn_init_qs8_minmax_scalar_fmagic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
-TEST(QC8_GEMM_MINMAX_FP32_1X4__SCALAR_MAGIC, k_eq_1_subtile_n) {
+TEST(QC8_GEMM_MINMAX_FP32_1X4__SCALAR_FMAGIC, k_eq_1_subtile_n) {
for (uint32_t n = 1; n <= 4; n++) {
GemmMicrokernelTester()
.mr(1)
@@ -81233,11 +81233,11 @@
.n(n)
.k(1)
.iterations(1)
- .Test(xnn_qc8_gemm_minmax_fp32_ukernel_1x4__scalar_magic, xnn_init_qs8_minmax_scalar_magic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qc8_gemm_minmax_fp32_ukernel_1x4__scalar_fmagic, xnn_init_qs8_minmax_scalar_fmagic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
-TEST(QC8_GEMM_MINMAX_FP32_1X4__SCALAR_MAGIC, k_gt_1) {
+TEST(QC8_GEMM_MINMAX_FP32_1X4__SCALAR_FMAGIC, k_gt_1) {
for (size_t k = 2; k < 10; k++) {
GemmMicrokernelTester()
.mr(1)
@@ -81247,11 +81247,11 @@
.m(1)
.n(4)
.k(k)
- .Test(xnn_qc8_gemm_minmax_fp32_ukernel_1x4__scalar_magic, xnn_init_qs8_minmax_scalar_magic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qc8_gemm_minmax_fp32_ukernel_1x4__scalar_fmagic, xnn_init_qs8_minmax_scalar_fmagic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
-TEST(QC8_GEMM_MINMAX_FP32_1X4__SCALAR_MAGIC, k_gt_1_strided_a) {
+TEST(QC8_GEMM_MINMAX_FP32_1X4__SCALAR_FMAGIC, k_gt_1_strided_a) {
for (size_t k = 2; k < 10; k++) {
GemmMicrokernelTester()
.mr(1)
@@ -81262,11 +81262,11 @@
.n(4)
.k(k)
.a_stride(11)
- .Test(xnn_qc8_gemm_minmax_fp32_ukernel_1x4__scalar_magic, xnn_init_qs8_minmax_scalar_magic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qc8_gemm_minmax_fp32_ukernel_1x4__scalar_fmagic, xnn_init_qs8_minmax_scalar_fmagic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
-TEST(QC8_GEMM_MINMAX_FP32_1X4__SCALAR_MAGIC, k_gt_1_subtile) {
+TEST(QC8_GEMM_MINMAX_FP32_1X4__SCALAR_FMAGIC, k_gt_1_subtile) {
for (size_t k = 2; k < 10; k++) {
for (uint32_t m = 1; m <= 1; m++) {
for (uint32_t n = 1; n <= 4; n++) {
@@ -81279,13 +81279,13 @@
.n(n)
.k(k)
.iterations(1)
- .Test(xnn_qc8_gemm_minmax_fp32_ukernel_1x4__scalar_magic, xnn_init_qs8_minmax_scalar_magic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qc8_gemm_minmax_fp32_ukernel_1x4__scalar_fmagic, xnn_init_qs8_minmax_scalar_fmagic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
}
}
-TEST(QC8_GEMM_MINMAX_FP32_1X4__SCALAR_MAGIC, n_gt_4) {
+TEST(QC8_GEMM_MINMAX_FP32_1X4__SCALAR_FMAGIC, n_gt_4) {
for (uint32_t n = 5; n < 8; n++) {
for (size_t k = 1; k <= 5; k += 2) {
GemmMicrokernelTester()
@@ -81296,12 +81296,12 @@
.m(1)
.n(4)
.k(k)
- .Test(xnn_qc8_gemm_minmax_fp32_ukernel_1x4__scalar_magic, xnn_init_qs8_minmax_scalar_magic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qc8_gemm_minmax_fp32_ukernel_1x4__scalar_fmagic, xnn_init_qs8_minmax_scalar_fmagic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
}
-TEST(QC8_GEMM_MINMAX_FP32_1X4__SCALAR_MAGIC, n_gt_4_strided_cn) {
+TEST(QC8_GEMM_MINMAX_FP32_1X4__SCALAR_FMAGIC, n_gt_4_strided_cn) {
for (uint32_t n = 5; n < 8; n++) {
for (size_t k = 1; k <= 5; k += 2) {
GemmMicrokernelTester()
@@ -81313,12 +81313,12 @@
.n(4)
.k(k)
.cn_stride(7)
- .Test(xnn_qc8_gemm_minmax_fp32_ukernel_1x4__scalar_magic, xnn_init_qs8_minmax_scalar_magic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qc8_gemm_minmax_fp32_ukernel_1x4__scalar_fmagic, xnn_init_qs8_minmax_scalar_fmagic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
}
-TEST(QC8_GEMM_MINMAX_FP32_1X4__SCALAR_MAGIC, n_gt_4_strided_a) {
+TEST(QC8_GEMM_MINMAX_FP32_1X4__SCALAR_FMAGIC, n_gt_4_strided_a) {
for (uint32_t n = 5; n < 8; n++) {
for (size_t k = 1; k <= 5; k += 2) {
GemmMicrokernelTester()
@@ -81330,12 +81330,12 @@
.n(n)
.k(k)
.a_stride(7)
- .Test(xnn_qc8_gemm_minmax_fp32_ukernel_1x4__scalar_magic, xnn_init_qs8_minmax_scalar_magic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qc8_gemm_minmax_fp32_ukernel_1x4__scalar_fmagic, xnn_init_qs8_minmax_scalar_fmagic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
}
-TEST(QC8_GEMM_MINMAX_FP32_1X4__SCALAR_MAGIC, n_gt_4_subtile) {
+TEST(QC8_GEMM_MINMAX_FP32_1X4__SCALAR_FMAGIC, n_gt_4_subtile) {
for (uint32_t n = 5; n < 8; n++) {
for (size_t k = 1; k <= 5; k += 2) {
for (uint32_t m = 1; m <= 1; m++) {
@@ -81348,13 +81348,13 @@
.n(n)
.k(k)
.iterations(1)
- .Test(xnn_qc8_gemm_minmax_fp32_ukernel_1x4__scalar_magic, xnn_init_qs8_minmax_scalar_magic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qc8_gemm_minmax_fp32_ukernel_1x4__scalar_fmagic, xnn_init_qs8_minmax_scalar_fmagic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
}
}
-TEST(QC8_GEMM_MINMAX_FP32_1X4__SCALAR_MAGIC, n_div_4) {
+TEST(QC8_GEMM_MINMAX_FP32_1X4__SCALAR_FMAGIC, n_div_4) {
for (uint32_t n = 8; n <= 12; n += 4) {
for (size_t k = 1; k <= 5; k += 2) {
GemmMicrokernelTester()
@@ -81365,12 +81365,12 @@
.m(1)
.n(4)
.k(k)
- .Test(xnn_qc8_gemm_minmax_fp32_ukernel_1x4__scalar_magic, xnn_init_qs8_minmax_scalar_magic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qc8_gemm_minmax_fp32_ukernel_1x4__scalar_fmagic, xnn_init_qs8_minmax_scalar_fmagic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
}
-TEST(QC8_GEMM_MINMAX_FP32_1X4__SCALAR_MAGIC, n_div_4_strided_cn) {
+TEST(QC8_GEMM_MINMAX_FP32_1X4__SCALAR_FMAGIC, n_div_4_strided_cn) {
for (uint32_t n = 8; n <= 12; n += 4) {
for (size_t k = 1; k <= 5; k += 2) {
GemmMicrokernelTester()
@@ -81382,12 +81382,12 @@
.n(n)
.k(k)
.cn_stride(7)
- .Test(xnn_qc8_gemm_minmax_fp32_ukernel_1x4__scalar_magic, xnn_init_qs8_minmax_scalar_magic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qc8_gemm_minmax_fp32_ukernel_1x4__scalar_fmagic, xnn_init_qs8_minmax_scalar_fmagic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
}
-TEST(QC8_GEMM_MINMAX_FP32_1X4__SCALAR_MAGIC, n_div_4_strided_a) {
+TEST(QC8_GEMM_MINMAX_FP32_1X4__SCALAR_FMAGIC, n_div_4_strided_a) {
for (uint32_t n = 8; n <= 12; n += 4) {
for (size_t k = 1; k <= 5; k += 2) {
GemmMicrokernelTester()
@@ -81399,12 +81399,12 @@
.n(n)
.k(k)
.a_stride(7)
- .Test(xnn_qc8_gemm_minmax_fp32_ukernel_1x4__scalar_magic, xnn_init_qs8_minmax_scalar_magic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qc8_gemm_minmax_fp32_ukernel_1x4__scalar_fmagic, xnn_init_qs8_minmax_scalar_fmagic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
}
-TEST(QC8_GEMM_MINMAX_FP32_1X4__SCALAR_MAGIC, n_div_4_subtile) {
+TEST(QC8_GEMM_MINMAX_FP32_1X4__SCALAR_FMAGIC, n_div_4_subtile) {
for (uint32_t n = 8; n <= 12; n += 4) {
for (size_t k = 1; k <= 5; k += 2) {
for (uint32_t m = 1; m <= 1; m++) {
@@ -81417,13 +81417,13 @@
.n(n)
.k(k)
.iterations(1)
- .Test(xnn_qc8_gemm_minmax_fp32_ukernel_1x4__scalar_magic, xnn_init_qs8_minmax_scalar_magic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qc8_gemm_minmax_fp32_ukernel_1x4__scalar_fmagic, xnn_init_qs8_minmax_scalar_fmagic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
}
}
-TEST(QC8_GEMM_MINMAX_FP32_1X4__SCALAR_MAGIC, strided_cm_subtile) {
+TEST(QC8_GEMM_MINMAX_FP32_1X4__SCALAR_FMAGIC, strided_cm_subtile) {
for (size_t k = 1; k <= 5; k += 2) {
for (uint32_t m = 1; m <= 1; m++) {
for (uint32_t n = 1; n <= 4; n++) {
@@ -81437,13 +81437,13 @@
.k(k)
.cm_stride(7)
.iterations(1)
- .Test(xnn_qc8_gemm_minmax_fp32_ukernel_1x4__scalar_magic, xnn_init_qs8_minmax_scalar_magic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qc8_gemm_minmax_fp32_ukernel_1x4__scalar_fmagic, xnn_init_qs8_minmax_scalar_fmagic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
}
}
-TEST(QC8_GEMM_MINMAX_FP32_1X4__SCALAR_MAGIC, qmin) {
+TEST(QC8_GEMM_MINMAX_FP32_1X4__SCALAR_FMAGIC, qmin) {
GemmMicrokernelTester()
.mr(1)
.nr(4)
@@ -81453,10 +81453,10 @@
.n(4)
.k(1)
.qmin(128)
- .Test(xnn_qc8_gemm_minmax_fp32_ukernel_1x4__scalar_magic, xnn_init_qs8_minmax_scalar_magic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qc8_gemm_minmax_fp32_ukernel_1x4__scalar_fmagic, xnn_init_qs8_minmax_scalar_fmagic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
-TEST(QC8_GEMM_MINMAX_FP32_1X4__SCALAR_MAGIC, qmax) {
+TEST(QC8_GEMM_MINMAX_FP32_1X4__SCALAR_FMAGIC, qmax) {
GemmMicrokernelTester()
.mr(1)
.nr(4)
@@ -81466,10 +81466,10 @@
.n(4)
.k(1)
.qmax(128)
- .Test(xnn_qc8_gemm_minmax_fp32_ukernel_1x4__scalar_magic, xnn_init_qs8_minmax_scalar_magic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qc8_gemm_minmax_fp32_ukernel_1x4__scalar_fmagic, xnn_init_qs8_minmax_scalar_fmagic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
-TEST(QC8_GEMM_MINMAX_FP32_1X4__SCALAR_MAGIC, strided_cm) {
+TEST(QC8_GEMM_MINMAX_FP32_1X4__SCALAR_FMAGIC, strided_cm) {
GemmMicrokernelTester()
.mr(1)
.nr(4)
@@ -81479,11 +81479,11 @@
.n(4)
.k(1)
.cm_stride(7)
- .Test(xnn_qc8_gemm_minmax_fp32_ukernel_1x4__scalar_magic, xnn_init_qs8_minmax_scalar_magic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qc8_gemm_minmax_fp32_ukernel_1x4__scalar_fmagic, xnn_init_qs8_minmax_scalar_fmagic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
-TEST(QC8_GEMM_MINMAX_FP32_2X4__SCALAR_MAGIC, k_eq_1) {
+TEST(QC8_GEMM_MINMAX_FP32_2X4__SCALAR_FMAGIC, k_eq_1) {
GemmMicrokernelTester()
.mr(2)
.nr(4)
@@ -81492,10 +81492,10 @@
.m(2)
.n(4)
.k(1)
- .Test(xnn_qc8_gemm_minmax_fp32_ukernel_2x4__scalar_magic, xnn_init_qs8_minmax_scalar_magic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qc8_gemm_minmax_fp32_ukernel_2x4__scalar_fmagic, xnn_init_qs8_minmax_scalar_fmagic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
-TEST(QC8_GEMM_MINMAX_FP32_2X4__SCALAR_MAGIC, strided_cn) {
+TEST(QC8_GEMM_MINMAX_FP32_2X4__SCALAR_FMAGIC, strided_cn) {
GemmMicrokernelTester()
.mr(2)
.nr(4)
@@ -81505,10 +81505,10 @@
.n(4)
.k(1)
.cn_stride(7)
- .Test(xnn_qc8_gemm_minmax_fp32_ukernel_2x4__scalar_magic, xnn_init_qs8_minmax_scalar_magic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qc8_gemm_minmax_fp32_ukernel_2x4__scalar_fmagic, xnn_init_qs8_minmax_scalar_fmagic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
-TEST(QC8_GEMM_MINMAX_FP32_2X4__SCALAR_MAGIC, k_eq_1_strided_a) {
+TEST(QC8_GEMM_MINMAX_FP32_2X4__SCALAR_FMAGIC, k_eq_1_strided_a) {
GemmMicrokernelTester()
.mr(2)
.nr(4)
@@ -81518,10 +81518,10 @@
.n(4)
.k(1)
.a_stride(3)
- .Test(xnn_qc8_gemm_minmax_fp32_ukernel_2x4__scalar_magic, xnn_init_qs8_minmax_scalar_magic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qc8_gemm_minmax_fp32_ukernel_2x4__scalar_fmagic, xnn_init_qs8_minmax_scalar_fmagic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
-TEST(QC8_GEMM_MINMAX_FP32_2X4__SCALAR_MAGIC, k_eq_1_subtile) {
+TEST(QC8_GEMM_MINMAX_FP32_2X4__SCALAR_FMAGIC, k_eq_1_subtile) {
for (uint32_t m = 1; m <= 2; m++) {
for (uint32_t n = 1; n <= 4; n++) {
GemmMicrokernelTester()
@@ -81533,12 +81533,12 @@
.n(n)
.k(1)
.iterations(1)
- .Test(xnn_qc8_gemm_minmax_fp32_ukernel_2x4__scalar_magic, xnn_init_qs8_minmax_scalar_magic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qc8_gemm_minmax_fp32_ukernel_2x4__scalar_fmagic, xnn_init_qs8_minmax_scalar_fmagic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
}
-TEST(QC8_GEMM_MINMAX_FP32_2X4__SCALAR_MAGIC, k_eq_1_subtile_m) {
+TEST(QC8_GEMM_MINMAX_FP32_2X4__SCALAR_FMAGIC, k_eq_1_subtile_m) {
for (uint32_t m = 1; m <= 2; m++) {
GemmMicrokernelTester()
.mr(2)
@@ -81549,11 +81549,11 @@
.n(4)
.k(1)
.iterations(1)
- .Test(xnn_qc8_gemm_minmax_fp32_ukernel_2x4__scalar_magic, xnn_init_qs8_minmax_scalar_magic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qc8_gemm_minmax_fp32_ukernel_2x4__scalar_fmagic, xnn_init_qs8_minmax_scalar_fmagic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
-TEST(QC8_GEMM_MINMAX_FP32_2X4__SCALAR_MAGIC, k_eq_1_subtile_n) {
+TEST(QC8_GEMM_MINMAX_FP32_2X4__SCALAR_FMAGIC, k_eq_1_subtile_n) {
for (uint32_t n = 1; n <= 4; n++) {
GemmMicrokernelTester()
.mr(2)
@@ -81564,11 +81564,11 @@
.n(n)
.k(1)
.iterations(1)
- .Test(xnn_qc8_gemm_minmax_fp32_ukernel_2x4__scalar_magic, xnn_init_qs8_minmax_scalar_magic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qc8_gemm_minmax_fp32_ukernel_2x4__scalar_fmagic, xnn_init_qs8_minmax_scalar_fmagic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
-TEST(QC8_GEMM_MINMAX_FP32_2X4__SCALAR_MAGIC, k_gt_1) {
+TEST(QC8_GEMM_MINMAX_FP32_2X4__SCALAR_FMAGIC, k_gt_1) {
for (size_t k = 2; k < 10; k++) {
GemmMicrokernelTester()
.mr(2)
@@ -81578,11 +81578,11 @@
.m(2)
.n(4)
.k(k)
- .Test(xnn_qc8_gemm_minmax_fp32_ukernel_2x4__scalar_magic, xnn_init_qs8_minmax_scalar_magic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qc8_gemm_minmax_fp32_ukernel_2x4__scalar_fmagic, xnn_init_qs8_minmax_scalar_fmagic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
-TEST(QC8_GEMM_MINMAX_FP32_2X4__SCALAR_MAGIC, k_gt_1_strided_a) {
+TEST(QC8_GEMM_MINMAX_FP32_2X4__SCALAR_FMAGIC, k_gt_1_strided_a) {
for (size_t k = 2; k < 10; k++) {
GemmMicrokernelTester()
.mr(2)
@@ -81593,11 +81593,11 @@
.n(4)
.k(k)
.a_stride(11)
- .Test(xnn_qc8_gemm_minmax_fp32_ukernel_2x4__scalar_magic, xnn_init_qs8_minmax_scalar_magic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qc8_gemm_minmax_fp32_ukernel_2x4__scalar_fmagic, xnn_init_qs8_minmax_scalar_fmagic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
-TEST(QC8_GEMM_MINMAX_FP32_2X4__SCALAR_MAGIC, k_gt_1_subtile) {
+TEST(QC8_GEMM_MINMAX_FP32_2X4__SCALAR_FMAGIC, k_gt_1_subtile) {
for (size_t k = 2; k < 10; k++) {
for (uint32_t m = 1; m <= 2; m++) {
for (uint32_t n = 1; n <= 4; n++) {
@@ -81610,13 +81610,13 @@
.n(n)
.k(k)
.iterations(1)
- .Test(xnn_qc8_gemm_minmax_fp32_ukernel_2x4__scalar_magic, xnn_init_qs8_minmax_scalar_magic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qc8_gemm_minmax_fp32_ukernel_2x4__scalar_fmagic, xnn_init_qs8_minmax_scalar_fmagic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
}
}
-TEST(QC8_GEMM_MINMAX_FP32_2X4__SCALAR_MAGIC, n_gt_4) {
+TEST(QC8_GEMM_MINMAX_FP32_2X4__SCALAR_FMAGIC, n_gt_4) {
for (uint32_t n = 5; n < 8; n++) {
for (size_t k = 1; k <= 5; k += 2) {
GemmMicrokernelTester()
@@ -81627,12 +81627,12 @@
.m(2)
.n(4)
.k(k)
- .Test(xnn_qc8_gemm_minmax_fp32_ukernel_2x4__scalar_magic, xnn_init_qs8_minmax_scalar_magic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qc8_gemm_minmax_fp32_ukernel_2x4__scalar_fmagic, xnn_init_qs8_minmax_scalar_fmagic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
}
-TEST(QC8_GEMM_MINMAX_FP32_2X4__SCALAR_MAGIC, n_gt_4_strided_cn) {
+TEST(QC8_GEMM_MINMAX_FP32_2X4__SCALAR_FMAGIC, n_gt_4_strided_cn) {
for (uint32_t n = 5; n < 8; n++) {
for (size_t k = 1; k <= 5; k += 2) {
GemmMicrokernelTester()
@@ -81644,12 +81644,12 @@
.n(4)
.k(k)
.cn_stride(7)
- .Test(xnn_qc8_gemm_minmax_fp32_ukernel_2x4__scalar_magic, xnn_init_qs8_minmax_scalar_magic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qc8_gemm_minmax_fp32_ukernel_2x4__scalar_fmagic, xnn_init_qs8_minmax_scalar_fmagic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
}
-TEST(QC8_GEMM_MINMAX_FP32_2X4__SCALAR_MAGIC, n_gt_4_strided_a) {
+TEST(QC8_GEMM_MINMAX_FP32_2X4__SCALAR_FMAGIC, n_gt_4_strided_a) {
for (uint32_t n = 5; n < 8; n++) {
for (size_t k = 1; k <= 5; k += 2) {
GemmMicrokernelTester()
@@ -81661,12 +81661,12 @@
.n(n)
.k(k)
.a_stride(7)
- .Test(xnn_qc8_gemm_minmax_fp32_ukernel_2x4__scalar_magic, xnn_init_qs8_minmax_scalar_magic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qc8_gemm_minmax_fp32_ukernel_2x4__scalar_fmagic, xnn_init_qs8_minmax_scalar_fmagic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
}
-TEST(QC8_GEMM_MINMAX_FP32_2X4__SCALAR_MAGIC, n_gt_4_subtile) {
+TEST(QC8_GEMM_MINMAX_FP32_2X4__SCALAR_FMAGIC, n_gt_4_subtile) {
for (uint32_t n = 5; n < 8; n++) {
for (size_t k = 1; k <= 5; k += 2) {
for (uint32_t m = 1; m <= 2; m++) {
@@ -81679,13 +81679,13 @@
.n(n)
.k(k)
.iterations(1)
- .Test(xnn_qc8_gemm_minmax_fp32_ukernel_2x4__scalar_magic, xnn_init_qs8_minmax_scalar_magic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qc8_gemm_minmax_fp32_ukernel_2x4__scalar_fmagic, xnn_init_qs8_minmax_scalar_fmagic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
}
}
-TEST(QC8_GEMM_MINMAX_FP32_2X4__SCALAR_MAGIC, n_div_4) {
+TEST(QC8_GEMM_MINMAX_FP32_2X4__SCALAR_FMAGIC, n_div_4) {
for (uint32_t n = 8; n <= 12; n += 4) {
for (size_t k = 1; k <= 5; k += 2) {
GemmMicrokernelTester()
@@ -81696,12 +81696,12 @@
.m(2)
.n(4)
.k(k)
- .Test(xnn_qc8_gemm_minmax_fp32_ukernel_2x4__scalar_magic, xnn_init_qs8_minmax_scalar_magic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qc8_gemm_minmax_fp32_ukernel_2x4__scalar_fmagic, xnn_init_qs8_minmax_scalar_fmagic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
}
-TEST(QC8_GEMM_MINMAX_FP32_2X4__SCALAR_MAGIC, n_div_4_strided_cn) {
+TEST(QC8_GEMM_MINMAX_FP32_2X4__SCALAR_FMAGIC, n_div_4_strided_cn) {
for (uint32_t n = 8; n <= 12; n += 4) {
for (size_t k = 1; k <= 5; k += 2) {
GemmMicrokernelTester()
@@ -81713,12 +81713,12 @@
.n(n)
.k(k)
.cn_stride(7)
- .Test(xnn_qc8_gemm_minmax_fp32_ukernel_2x4__scalar_magic, xnn_init_qs8_minmax_scalar_magic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qc8_gemm_minmax_fp32_ukernel_2x4__scalar_fmagic, xnn_init_qs8_minmax_scalar_fmagic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
}
-TEST(QC8_GEMM_MINMAX_FP32_2X4__SCALAR_MAGIC, n_div_4_strided_a) {
+TEST(QC8_GEMM_MINMAX_FP32_2X4__SCALAR_FMAGIC, n_div_4_strided_a) {
for (uint32_t n = 8; n <= 12; n += 4) {
for (size_t k = 1; k <= 5; k += 2) {
GemmMicrokernelTester()
@@ -81730,12 +81730,12 @@
.n(n)
.k(k)
.a_stride(7)
- .Test(xnn_qc8_gemm_minmax_fp32_ukernel_2x4__scalar_magic, xnn_init_qs8_minmax_scalar_magic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qc8_gemm_minmax_fp32_ukernel_2x4__scalar_fmagic, xnn_init_qs8_minmax_scalar_fmagic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
}
-TEST(QC8_GEMM_MINMAX_FP32_2X4__SCALAR_MAGIC, n_div_4_subtile) {
+TEST(QC8_GEMM_MINMAX_FP32_2X4__SCALAR_FMAGIC, n_div_4_subtile) {
for (uint32_t n = 8; n <= 12; n += 4) {
for (size_t k = 1; k <= 5; k += 2) {
for (uint32_t m = 1; m <= 2; m++) {
@@ -81748,13 +81748,13 @@
.n(n)
.k(k)
.iterations(1)
- .Test(xnn_qc8_gemm_minmax_fp32_ukernel_2x4__scalar_magic, xnn_init_qs8_minmax_scalar_magic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qc8_gemm_minmax_fp32_ukernel_2x4__scalar_fmagic, xnn_init_qs8_minmax_scalar_fmagic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
}
}
-TEST(QC8_GEMM_MINMAX_FP32_2X4__SCALAR_MAGIC, strided_cm_subtile) {
+TEST(QC8_GEMM_MINMAX_FP32_2X4__SCALAR_FMAGIC, strided_cm_subtile) {
for (size_t k = 1; k <= 5; k += 2) {
for (uint32_t m = 1; m <= 2; m++) {
for (uint32_t n = 1; n <= 4; n++) {
@@ -81768,13 +81768,13 @@
.k(k)
.cm_stride(7)
.iterations(1)
- .Test(xnn_qc8_gemm_minmax_fp32_ukernel_2x4__scalar_magic, xnn_init_qs8_minmax_scalar_magic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qc8_gemm_minmax_fp32_ukernel_2x4__scalar_fmagic, xnn_init_qs8_minmax_scalar_fmagic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
}
}
-TEST(QC8_GEMM_MINMAX_FP32_2X4__SCALAR_MAGIC, qmin) {
+TEST(QC8_GEMM_MINMAX_FP32_2X4__SCALAR_FMAGIC, qmin) {
GemmMicrokernelTester()
.mr(2)
.nr(4)
@@ -81784,10 +81784,10 @@
.n(4)
.k(1)
.qmin(128)
- .Test(xnn_qc8_gemm_minmax_fp32_ukernel_2x4__scalar_magic, xnn_init_qs8_minmax_scalar_magic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qc8_gemm_minmax_fp32_ukernel_2x4__scalar_fmagic, xnn_init_qs8_minmax_scalar_fmagic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
-TEST(QC8_GEMM_MINMAX_FP32_2X4__SCALAR_MAGIC, qmax) {
+TEST(QC8_GEMM_MINMAX_FP32_2X4__SCALAR_FMAGIC, qmax) {
GemmMicrokernelTester()
.mr(2)
.nr(4)
@@ -81797,10 +81797,10 @@
.n(4)
.k(1)
.qmax(128)
- .Test(xnn_qc8_gemm_minmax_fp32_ukernel_2x4__scalar_magic, xnn_init_qs8_minmax_scalar_magic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qc8_gemm_minmax_fp32_ukernel_2x4__scalar_fmagic, xnn_init_qs8_minmax_scalar_fmagic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
-TEST(QC8_GEMM_MINMAX_FP32_2X4__SCALAR_MAGIC, strided_cm) {
+TEST(QC8_GEMM_MINMAX_FP32_2X4__SCALAR_FMAGIC, strided_cm) {
GemmMicrokernelTester()
.mr(2)
.nr(4)
@@ -81810,11 +81810,11 @@
.n(4)
.k(1)
.cm_stride(7)
- .Test(xnn_qc8_gemm_minmax_fp32_ukernel_2x4__scalar_magic, xnn_init_qs8_minmax_scalar_magic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qc8_gemm_minmax_fp32_ukernel_2x4__scalar_fmagic, xnn_init_qs8_minmax_scalar_fmagic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
-TEST(QC8_GEMM_MINMAX_FP32_3X4__SCALAR_MAGIC, k_eq_1) {
+TEST(QC8_GEMM_MINMAX_FP32_3X4__SCALAR_FMAGIC, k_eq_1) {
GemmMicrokernelTester()
.mr(3)
.nr(4)
@@ -81823,10 +81823,10 @@
.m(3)
.n(4)
.k(1)
- .Test(xnn_qc8_gemm_minmax_fp32_ukernel_3x4__scalar_magic, xnn_init_qs8_minmax_scalar_magic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qc8_gemm_minmax_fp32_ukernel_3x4__scalar_fmagic, xnn_init_qs8_minmax_scalar_fmagic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
-TEST(QC8_GEMM_MINMAX_FP32_3X4__SCALAR_MAGIC, strided_cn) {
+TEST(QC8_GEMM_MINMAX_FP32_3X4__SCALAR_FMAGIC, strided_cn) {
GemmMicrokernelTester()
.mr(3)
.nr(4)
@@ -81836,10 +81836,10 @@
.n(4)
.k(1)
.cn_stride(7)
- .Test(xnn_qc8_gemm_minmax_fp32_ukernel_3x4__scalar_magic, xnn_init_qs8_minmax_scalar_magic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qc8_gemm_minmax_fp32_ukernel_3x4__scalar_fmagic, xnn_init_qs8_minmax_scalar_fmagic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
-TEST(QC8_GEMM_MINMAX_FP32_3X4__SCALAR_MAGIC, k_eq_1_strided_a) {
+TEST(QC8_GEMM_MINMAX_FP32_3X4__SCALAR_FMAGIC, k_eq_1_strided_a) {
GemmMicrokernelTester()
.mr(3)
.nr(4)
@@ -81849,10 +81849,10 @@
.n(4)
.k(1)
.a_stride(3)
- .Test(xnn_qc8_gemm_minmax_fp32_ukernel_3x4__scalar_magic, xnn_init_qs8_minmax_scalar_magic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qc8_gemm_minmax_fp32_ukernel_3x4__scalar_fmagic, xnn_init_qs8_minmax_scalar_fmagic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
-TEST(QC8_GEMM_MINMAX_FP32_3X4__SCALAR_MAGIC, k_eq_1_subtile) {
+TEST(QC8_GEMM_MINMAX_FP32_3X4__SCALAR_FMAGIC, k_eq_1_subtile) {
for (uint32_t m = 1; m <= 3; m++) {
for (uint32_t n = 1; n <= 4; n++) {
GemmMicrokernelTester()
@@ -81864,12 +81864,12 @@
.n(n)
.k(1)
.iterations(1)
- .Test(xnn_qc8_gemm_minmax_fp32_ukernel_3x4__scalar_magic, xnn_init_qs8_minmax_scalar_magic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qc8_gemm_minmax_fp32_ukernel_3x4__scalar_fmagic, xnn_init_qs8_minmax_scalar_fmagic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
}
-TEST(QC8_GEMM_MINMAX_FP32_3X4__SCALAR_MAGIC, k_eq_1_subtile_m) {
+TEST(QC8_GEMM_MINMAX_FP32_3X4__SCALAR_FMAGIC, k_eq_1_subtile_m) {
for (uint32_t m = 1; m <= 3; m++) {
GemmMicrokernelTester()
.mr(3)
@@ -81880,11 +81880,11 @@
.n(4)
.k(1)
.iterations(1)
- .Test(xnn_qc8_gemm_minmax_fp32_ukernel_3x4__scalar_magic, xnn_init_qs8_minmax_scalar_magic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qc8_gemm_minmax_fp32_ukernel_3x4__scalar_fmagic, xnn_init_qs8_minmax_scalar_fmagic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
-TEST(QC8_GEMM_MINMAX_FP32_3X4__SCALAR_MAGIC, k_eq_1_subtile_n) {
+TEST(QC8_GEMM_MINMAX_FP32_3X4__SCALAR_FMAGIC, k_eq_1_subtile_n) {
for (uint32_t n = 1; n <= 4; n++) {
GemmMicrokernelTester()
.mr(3)
@@ -81895,11 +81895,11 @@
.n(n)
.k(1)
.iterations(1)
- .Test(xnn_qc8_gemm_minmax_fp32_ukernel_3x4__scalar_magic, xnn_init_qs8_minmax_scalar_magic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qc8_gemm_minmax_fp32_ukernel_3x4__scalar_fmagic, xnn_init_qs8_minmax_scalar_fmagic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
-TEST(QC8_GEMM_MINMAX_FP32_3X4__SCALAR_MAGIC, k_gt_1) {
+TEST(QC8_GEMM_MINMAX_FP32_3X4__SCALAR_FMAGIC, k_gt_1) {
for (size_t k = 2; k < 10; k++) {
GemmMicrokernelTester()
.mr(3)
@@ -81909,11 +81909,11 @@
.m(3)
.n(4)
.k(k)
- .Test(xnn_qc8_gemm_minmax_fp32_ukernel_3x4__scalar_magic, xnn_init_qs8_minmax_scalar_magic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qc8_gemm_minmax_fp32_ukernel_3x4__scalar_fmagic, xnn_init_qs8_minmax_scalar_fmagic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
-TEST(QC8_GEMM_MINMAX_FP32_3X4__SCALAR_MAGIC, k_gt_1_strided_a) {
+TEST(QC8_GEMM_MINMAX_FP32_3X4__SCALAR_FMAGIC, k_gt_1_strided_a) {
for (size_t k = 2; k < 10; k++) {
GemmMicrokernelTester()
.mr(3)
@@ -81924,11 +81924,11 @@
.n(4)
.k(k)
.a_stride(11)
- .Test(xnn_qc8_gemm_minmax_fp32_ukernel_3x4__scalar_magic, xnn_init_qs8_minmax_scalar_magic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qc8_gemm_minmax_fp32_ukernel_3x4__scalar_fmagic, xnn_init_qs8_minmax_scalar_fmagic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
-TEST(QC8_GEMM_MINMAX_FP32_3X4__SCALAR_MAGIC, k_gt_1_subtile) {
+TEST(QC8_GEMM_MINMAX_FP32_3X4__SCALAR_FMAGIC, k_gt_1_subtile) {
for (size_t k = 2; k < 10; k++) {
for (uint32_t m = 1; m <= 3; m++) {
for (uint32_t n = 1; n <= 4; n++) {
@@ -81941,13 +81941,13 @@
.n(n)
.k(k)
.iterations(1)
- .Test(xnn_qc8_gemm_minmax_fp32_ukernel_3x4__scalar_magic, xnn_init_qs8_minmax_scalar_magic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qc8_gemm_minmax_fp32_ukernel_3x4__scalar_fmagic, xnn_init_qs8_minmax_scalar_fmagic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
}
}
-TEST(QC8_GEMM_MINMAX_FP32_3X4__SCALAR_MAGIC, n_gt_4) {
+TEST(QC8_GEMM_MINMAX_FP32_3X4__SCALAR_FMAGIC, n_gt_4) {
for (uint32_t n = 5; n < 8; n++) {
for (size_t k = 1; k <= 5; k += 2) {
GemmMicrokernelTester()
@@ -81958,12 +81958,12 @@
.m(3)
.n(4)
.k(k)
- .Test(xnn_qc8_gemm_minmax_fp32_ukernel_3x4__scalar_magic, xnn_init_qs8_minmax_scalar_magic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qc8_gemm_minmax_fp32_ukernel_3x4__scalar_fmagic, xnn_init_qs8_minmax_scalar_fmagic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
}
-TEST(QC8_GEMM_MINMAX_FP32_3X4__SCALAR_MAGIC, n_gt_4_strided_cn) {
+TEST(QC8_GEMM_MINMAX_FP32_3X4__SCALAR_FMAGIC, n_gt_4_strided_cn) {
for (uint32_t n = 5; n < 8; n++) {
for (size_t k = 1; k <= 5; k += 2) {
GemmMicrokernelTester()
@@ -81975,12 +81975,12 @@
.n(4)
.k(k)
.cn_stride(7)
- .Test(xnn_qc8_gemm_minmax_fp32_ukernel_3x4__scalar_magic, xnn_init_qs8_minmax_scalar_magic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qc8_gemm_minmax_fp32_ukernel_3x4__scalar_fmagic, xnn_init_qs8_minmax_scalar_fmagic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
}
-TEST(QC8_GEMM_MINMAX_FP32_3X4__SCALAR_MAGIC, n_gt_4_strided_a) {
+TEST(QC8_GEMM_MINMAX_FP32_3X4__SCALAR_FMAGIC, n_gt_4_strided_a) {
for (uint32_t n = 5; n < 8; n++) {
for (size_t k = 1; k <= 5; k += 2) {
GemmMicrokernelTester()
@@ -81992,12 +81992,12 @@
.n(n)
.k(k)
.a_stride(7)
- .Test(xnn_qc8_gemm_minmax_fp32_ukernel_3x4__scalar_magic, xnn_init_qs8_minmax_scalar_magic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qc8_gemm_minmax_fp32_ukernel_3x4__scalar_fmagic, xnn_init_qs8_minmax_scalar_fmagic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
}
-TEST(QC8_GEMM_MINMAX_FP32_3X4__SCALAR_MAGIC, n_gt_4_subtile) {
+TEST(QC8_GEMM_MINMAX_FP32_3X4__SCALAR_FMAGIC, n_gt_4_subtile) {
for (uint32_t n = 5; n < 8; n++) {
for (size_t k = 1; k <= 5; k += 2) {
for (uint32_t m = 1; m <= 3; m++) {
@@ -82010,13 +82010,13 @@
.n(n)
.k(k)
.iterations(1)
- .Test(xnn_qc8_gemm_minmax_fp32_ukernel_3x4__scalar_magic, xnn_init_qs8_minmax_scalar_magic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qc8_gemm_minmax_fp32_ukernel_3x4__scalar_fmagic, xnn_init_qs8_minmax_scalar_fmagic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
}
}
-TEST(QC8_GEMM_MINMAX_FP32_3X4__SCALAR_MAGIC, n_div_4) {
+TEST(QC8_GEMM_MINMAX_FP32_3X4__SCALAR_FMAGIC, n_div_4) {
for (uint32_t n = 8; n <= 12; n += 4) {
for (size_t k = 1; k <= 5; k += 2) {
GemmMicrokernelTester()
@@ -82027,12 +82027,12 @@
.m(3)
.n(4)
.k(k)
- .Test(xnn_qc8_gemm_minmax_fp32_ukernel_3x4__scalar_magic, xnn_init_qs8_minmax_scalar_magic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qc8_gemm_minmax_fp32_ukernel_3x4__scalar_fmagic, xnn_init_qs8_minmax_scalar_fmagic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
}
-TEST(QC8_GEMM_MINMAX_FP32_3X4__SCALAR_MAGIC, n_div_4_strided_cn) {
+TEST(QC8_GEMM_MINMAX_FP32_3X4__SCALAR_FMAGIC, n_div_4_strided_cn) {
for (uint32_t n = 8; n <= 12; n += 4) {
for (size_t k = 1; k <= 5; k += 2) {
GemmMicrokernelTester()
@@ -82044,12 +82044,12 @@
.n(n)
.k(k)
.cn_stride(7)
- .Test(xnn_qc8_gemm_minmax_fp32_ukernel_3x4__scalar_magic, xnn_init_qs8_minmax_scalar_magic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qc8_gemm_minmax_fp32_ukernel_3x4__scalar_fmagic, xnn_init_qs8_minmax_scalar_fmagic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
}
-TEST(QC8_GEMM_MINMAX_FP32_3X4__SCALAR_MAGIC, n_div_4_strided_a) {
+TEST(QC8_GEMM_MINMAX_FP32_3X4__SCALAR_FMAGIC, n_div_4_strided_a) {
for (uint32_t n = 8; n <= 12; n += 4) {
for (size_t k = 1; k <= 5; k += 2) {
GemmMicrokernelTester()
@@ -82061,12 +82061,12 @@
.n(n)
.k(k)
.a_stride(7)
- .Test(xnn_qc8_gemm_minmax_fp32_ukernel_3x4__scalar_magic, xnn_init_qs8_minmax_scalar_magic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qc8_gemm_minmax_fp32_ukernel_3x4__scalar_fmagic, xnn_init_qs8_minmax_scalar_fmagic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
}
-TEST(QC8_GEMM_MINMAX_FP32_3X4__SCALAR_MAGIC, n_div_4_subtile) {
+TEST(QC8_GEMM_MINMAX_FP32_3X4__SCALAR_FMAGIC, n_div_4_subtile) {
for (uint32_t n = 8; n <= 12; n += 4) {
for (size_t k = 1; k <= 5; k += 2) {
for (uint32_t m = 1; m <= 3; m++) {
@@ -82079,13 +82079,13 @@
.n(n)
.k(k)
.iterations(1)
- .Test(xnn_qc8_gemm_minmax_fp32_ukernel_3x4__scalar_magic, xnn_init_qs8_minmax_scalar_magic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qc8_gemm_minmax_fp32_ukernel_3x4__scalar_fmagic, xnn_init_qs8_minmax_scalar_fmagic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
}
}
-TEST(QC8_GEMM_MINMAX_FP32_3X4__SCALAR_MAGIC, strided_cm_subtile) {
+TEST(QC8_GEMM_MINMAX_FP32_3X4__SCALAR_FMAGIC, strided_cm_subtile) {
for (size_t k = 1; k <= 5; k += 2) {
for (uint32_t m = 1; m <= 3; m++) {
for (uint32_t n = 1; n <= 4; n++) {
@@ -82099,13 +82099,13 @@
.k(k)
.cm_stride(7)
.iterations(1)
- .Test(xnn_qc8_gemm_minmax_fp32_ukernel_3x4__scalar_magic, xnn_init_qs8_minmax_scalar_magic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qc8_gemm_minmax_fp32_ukernel_3x4__scalar_fmagic, xnn_init_qs8_minmax_scalar_fmagic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
}
}
-TEST(QC8_GEMM_MINMAX_FP32_3X4__SCALAR_MAGIC, qmin) {
+TEST(QC8_GEMM_MINMAX_FP32_3X4__SCALAR_FMAGIC, qmin) {
GemmMicrokernelTester()
.mr(3)
.nr(4)
@@ -82115,10 +82115,10 @@
.n(4)
.k(1)
.qmin(128)
- .Test(xnn_qc8_gemm_minmax_fp32_ukernel_3x4__scalar_magic, xnn_init_qs8_minmax_scalar_magic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qc8_gemm_minmax_fp32_ukernel_3x4__scalar_fmagic, xnn_init_qs8_minmax_scalar_fmagic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
-TEST(QC8_GEMM_MINMAX_FP32_3X4__SCALAR_MAGIC, qmax) {
+TEST(QC8_GEMM_MINMAX_FP32_3X4__SCALAR_FMAGIC, qmax) {
GemmMicrokernelTester()
.mr(3)
.nr(4)
@@ -82128,10 +82128,10 @@
.n(4)
.k(1)
.qmax(128)
- .Test(xnn_qc8_gemm_minmax_fp32_ukernel_3x4__scalar_magic, xnn_init_qs8_minmax_scalar_magic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qc8_gemm_minmax_fp32_ukernel_3x4__scalar_fmagic, xnn_init_qs8_minmax_scalar_fmagic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
-TEST(QC8_GEMM_MINMAX_FP32_3X4__SCALAR_MAGIC, strided_cm) {
+TEST(QC8_GEMM_MINMAX_FP32_3X4__SCALAR_FMAGIC, strided_cm) {
GemmMicrokernelTester()
.mr(3)
.nr(4)
@@ -82141,11 +82141,11 @@
.n(4)
.k(1)
.cm_stride(7)
- .Test(xnn_qc8_gemm_minmax_fp32_ukernel_3x4__scalar_magic, xnn_init_qs8_minmax_scalar_magic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qc8_gemm_minmax_fp32_ukernel_3x4__scalar_fmagic, xnn_init_qs8_minmax_scalar_fmagic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
-TEST(QC8_GEMM_MINMAX_FP32_4X4__SCALAR_MAGIC, k_eq_1) {
+TEST(QC8_GEMM_MINMAX_FP32_4X4__SCALAR_FMAGIC, k_eq_1) {
GemmMicrokernelTester()
.mr(4)
.nr(4)
@@ -82154,10 +82154,10 @@
.m(4)
.n(4)
.k(1)
- .Test(xnn_qc8_gemm_minmax_fp32_ukernel_4x4__scalar_magic, xnn_init_qs8_minmax_scalar_magic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qc8_gemm_minmax_fp32_ukernel_4x4__scalar_fmagic, xnn_init_qs8_minmax_scalar_fmagic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
-TEST(QC8_GEMM_MINMAX_FP32_4X4__SCALAR_MAGIC, strided_cn) {
+TEST(QC8_GEMM_MINMAX_FP32_4X4__SCALAR_FMAGIC, strided_cn) {
GemmMicrokernelTester()
.mr(4)
.nr(4)
@@ -82167,10 +82167,10 @@
.n(4)
.k(1)
.cn_stride(7)
- .Test(xnn_qc8_gemm_minmax_fp32_ukernel_4x4__scalar_magic, xnn_init_qs8_minmax_scalar_magic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qc8_gemm_minmax_fp32_ukernel_4x4__scalar_fmagic, xnn_init_qs8_minmax_scalar_fmagic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
-TEST(QC8_GEMM_MINMAX_FP32_4X4__SCALAR_MAGIC, k_eq_1_strided_a) {
+TEST(QC8_GEMM_MINMAX_FP32_4X4__SCALAR_FMAGIC, k_eq_1_strided_a) {
GemmMicrokernelTester()
.mr(4)
.nr(4)
@@ -82180,10 +82180,10 @@
.n(4)
.k(1)
.a_stride(3)
- .Test(xnn_qc8_gemm_minmax_fp32_ukernel_4x4__scalar_magic, xnn_init_qs8_minmax_scalar_magic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qc8_gemm_minmax_fp32_ukernel_4x4__scalar_fmagic, xnn_init_qs8_minmax_scalar_fmagic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
-TEST(QC8_GEMM_MINMAX_FP32_4X4__SCALAR_MAGIC, k_eq_1_subtile) {
+TEST(QC8_GEMM_MINMAX_FP32_4X4__SCALAR_FMAGIC, k_eq_1_subtile) {
for (uint32_t m = 1; m <= 4; m++) {
for (uint32_t n = 1; n <= 4; n++) {
GemmMicrokernelTester()
@@ -82195,12 +82195,12 @@
.n(n)
.k(1)
.iterations(1)
- .Test(xnn_qc8_gemm_minmax_fp32_ukernel_4x4__scalar_magic, xnn_init_qs8_minmax_scalar_magic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qc8_gemm_minmax_fp32_ukernel_4x4__scalar_fmagic, xnn_init_qs8_minmax_scalar_fmagic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
}
-TEST(QC8_GEMM_MINMAX_FP32_4X4__SCALAR_MAGIC, k_eq_1_subtile_m) {
+TEST(QC8_GEMM_MINMAX_FP32_4X4__SCALAR_FMAGIC, k_eq_1_subtile_m) {
for (uint32_t m = 1; m <= 4; m++) {
GemmMicrokernelTester()
.mr(4)
@@ -82211,11 +82211,11 @@
.n(4)
.k(1)
.iterations(1)
- .Test(xnn_qc8_gemm_minmax_fp32_ukernel_4x4__scalar_magic, xnn_init_qs8_minmax_scalar_magic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qc8_gemm_minmax_fp32_ukernel_4x4__scalar_fmagic, xnn_init_qs8_minmax_scalar_fmagic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
-TEST(QC8_GEMM_MINMAX_FP32_4X4__SCALAR_MAGIC, k_eq_1_subtile_n) {
+TEST(QC8_GEMM_MINMAX_FP32_4X4__SCALAR_FMAGIC, k_eq_1_subtile_n) {
for (uint32_t n = 1; n <= 4; n++) {
GemmMicrokernelTester()
.mr(4)
@@ -82226,11 +82226,11 @@
.n(n)
.k(1)
.iterations(1)
- .Test(xnn_qc8_gemm_minmax_fp32_ukernel_4x4__scalar_magic, xnn_init_qs8_minmax_scalar_magic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qc8_gemm_minmax_fp32_ukernel_4x4__scalar_fmagic, xnn_init_qs8_minmax_scalar_fmagic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
-TEST(QC8_GEMM_MINMAX_FP32_4X4__SCALAR_MAGIC, k_gt_1) {
+TEST(QC8_GEMM_MINMAX_FP32_4X4__SCALAR_FMAGIC, k_gt_1) {
for (size_t k = 2; k < 10; k++) {
GemmMicrokernelTester()
.mr(4)
@@ -82240,11 +82240,11 @@
.m(4)
.n(4)
.k(k)
- .Test(xnn_qc8_gemm_minmax_fp32_ukernel_4x4__scalar_magic, xnn_init_qs8_minmax_scalar_magic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qc8_gemm_minmax_fp32_ukernel_4x4__scalar_fmagic, xnn_init_qs8_minmax_scalar_fmagic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
-TEST(QC8_GEMM_MINMAX_FP32_4X4__SCALAR_MAGIC, k_gt_1_strided_a) {
+TEST(QC8_GEMM_MINMAX_FP32_4X4__SCALAR_FMAGIC, k_gt_1_strided_a) {
for (size_t k = 2; k < 10; k++) {
GemmMicrokernelTester()
.mr(4)
@@ -82255,11 +82255,11 @@
.n(4)
.k(k)
.a_stride(11)
- .Test(xnn_qc8_gemm_minmax_fp32_ukernel_4x4__scalar_magic, xnn_init_qs8_minmax_scalar_magic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qc8_gemm_minmax_fp32_ukernel_4x4__scalar_fmagic, xnn_init_qs8_minmax_scalar_fmagic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
-TEST(QC8_GEMM_MINMAX_FP32_4X4__SCALAR_MAGIC, k_gt_1_subtile) {
+TEST(QC8_GEMM_MINMAX_FP32_4X4__SCALAR_FMAGIC, k_gt_1_subtile) {
for (size_t k = 2; k < 10; k++) {
for (uint32_t m = 1; m <= 4; m++) {
for (uint32_t n = 1; n <= 4; n++) {
@@ -82272,13 +82272,13 @@
.n(n)
.k(k)
.iterations(1)
- .Test(xnn_qc8_gemm_minmax_fp32_ukernel_4x4__scalar_magic, xnn_init_qs8_minmax_scalar_magic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qc8_gemm_minmax_fp32_ukernel_4x4__scalar_fmagic, xnn_init_qs8_minmax_scalar_fmagic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
}
}
-TEST(QC8_GEMM_MINMAX_FP32_4X4__SCALAR_MAGIC, n_gt_4) {
+TEST(QC8_GEMM_MINMAX_FP32_4X4__SCALAR_FMAGIC, n_gt_4) {
for (uint32_t n = 5; n < 8; n++) {
for (size_t k = 1; k <= 5; k += 2) {
GemmMicrokernelTester()
@@ -82289,12 +82289,12 @@
.m(4)
.n(4)
.k(k)
- .Test(xnn_qc8_gemm_minmax_fp32_ukernel_4x4__scalar_magic, xnn_init_qs8_minmax_scalar_magic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qc8_gemm_minmax_fp32_ukernel_4x4__scalar_fmagic, xnn_init_qs8_minmax_scalar_fmagic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
}
-TEST(QC8_GEMM_MINMAX_FP32_4X4__SCALAR_MAGIC, n_gt_4_strided_cn) {
+TEST(QC8_GEMM_MINMAX_FP32_4X4__SCALAR_FMAGIC, n_gt_4_strided_cn) {
for (uint32_t n = 5; n < 8; n++) {
for (size_t k = 1; k <= 5; k += 2) {
GemmMicrokernelTester()
@@ -82306,12 +82306,12 @@
.n(4)
.k(k)
.cn_stride(7)
- .Test(xnn_qc8_gemm_minmax_fp32_ukernel_4x4__scalar_magic, xnn_init_qs8_minmax_scalar_magic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qc8_gemm_minmax_fp32_ukernel_4x4__scalar_fmagic, xnn_init_qs8_minmax_scalar_fmagic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
}
-TEST(QC8_GEMM_MINMAX_FP32_4X4__SCALAR_MAGIC, n_gt_4_strided_a) {
+TEST(QC8_GEMM_MINMAX_FP32_4X4__SCALAR_FMAGIC, n_gt_4_strided_a) {
for (uint32_t n = 5; n < 8; n++) {
for (size_t k = 1; k <= 5; k += 2) {
GemmMicrokernelTester()
@@ -82323,12 +82323,12 @@
.n(n)
.k(k)
.a_stride(7)
- .Test(xnn_qc8_gemm_minmax_fp32_ukernel_4x4__scalar_magic, xnn_init_qs8_minmax_scalar_magic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qc8_gemm_minmax_fp32_ukernel_4x4__scalar_fmagic, xnn_init_qs8_minmax_scalar_fmagic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
}
-TEST(QC8_GEMM_MINMAX_FP32_4X4__SCALAR_MAGIC, n_gt_4_subtile) {
+TEST(QC8_GEMM_MINMAX_FP32_4X4__SCALAR_FMAGIC, n_gt_4_subtile) {
for (uint32_t n = 5; n < 8; n++) {
for (size_t k = 1; k <= 5; k += 2) {
for (uint32_t m = 1; m <= 4; m++) {
@@ -82341,13 +82341,13 @@
.n(n)
.k(k)
.iterations(1)
- .Test(xnn_qc8_gemm_minmax_fp32_ukernel_4x4__scalar_magic, xnn_init_qs8_minmax_scalar_magic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qc8_gemm_minmax_fp32_ukernel_4x4__scalar_fmagic, xnn_init_qs8_minmax_scalar_fmagic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
}
}
-TEST(QC8_GEMM_MINMAX_FP32_4X4__SCALAR_MAGIC, n_div_4) {
+TEST(QC8_GEMM_MINMAX_FP32_4X4__SCALAR_FMAGIC, n_div_4) {
for (uint32_t n = 8; n <= 12; n += 4) {
for (size_t k = 1; k <= 5; k += 2) {
GemmMicrokernelTester()
@@ -82358,12 +82358,12 @@
.m(4)
.n(4)
.k(k)
- .Test(xnn_qc8_gemm_minmax_fp32_ukernel_4x4__scalar_magic, xnn_init_qs8_minmax_scalar_magic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qc8_gemm_minmax_fp32_ukernel_4x4__scalar_fmagic, xnn_init_qs8_minmax_scalar_fmagic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
}
-TEST(QC8_GEMM_MINMAX_FP32_4X4__SCALAR_MAGIC, n_div_4_strided_cn) {
+TEST(QC8_GEMM_MINMAX_FP32_4X4__SCALAR_FMAGIC, n_div_4_strided_cn) {
for (uint32_t n = 8; n <= 12; n += 4) {
for (size_t k = 1; k <= 5; k += 2) {
GemmMicrokernelTester()
@@ -82375,12 +82375,12 @@
.n(n)
.k(k)
.cn_stride(7)
- .Test(xnn_qc8_gemm_minmax_fp32_ukernel_4x4__scalar_magic, xnn_init_qs8_minmax_scalar_magic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qc8_gemm_minmax_fp32_ukernel_4x4__scalar_fmagic, xnn_init_qs8_minmax_scalar_fmagic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
}
-TEST(QC8_GEMM_MINMAX_FP32_4X4__SCALAR_MAGIC, n_div_4_strided_a) {
+TEST(QC8_GEMM_MINMAX_FP32_4X4__SCALAR_FMAGIC, n_div_4_strided_a) {
for (uint32_t n = 8; n <= 12; n += 4) {
for (size_t k = 1; k <= 5; k += 2) {
GemmMicrokernelTester()
@@ -82392,12 +82392,12 @@
.n(n)
.k(k)
.a_stride(7)
- .Test(xnn_qc8_gemm_minmax_fp32_ukernel_4x4__scalar_magic, xnn_init_qs8_minmax_scalar_magic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qc8_gemm_minmax_fp32_ukernel_4x4__scalar_fmagic, xnn_init_qs8_minmax_scalar_fmagic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
}
-TEST(QC8_GEMM_MINMAX_FP32_4X4__SCALAR_MAGIC, n_div_4_subtile) {
+TEST(QC8_GEMM_MINMAX_FP32_4X4__SCALAR_FMAGIC, n_div_4_subtile) {
for (uint32_t n = 8; n <= 12; n += 4) {
for (size_t k = 1; k <= 5; k += 2) {
for (uint32_t m = 1; m <= 4; m++) {
@@ -82410,13 +82410,13 @@
.n(n)
.k(k)
.iterations(1)
- .Test(xnn_qc8_gemm_minmax_fp32_ukernel_4x4__scalar_magic, xnn_init_qs8_minmax_scalar_magic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qc8_gemm_minmax_fp32_ukernel_4x4__scalar_fmagic, xnn_init_qs8_minmax_scalar_fmagic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
}
}
-TEST(QC8_GEMM_MINMAX_FP32_4X4__SCALAR_MAGIC, strided_cm_subtile) {
+TEST(QC8_GEMM_MINMAX_FP32_4X4__SCALAR_FMAGIC, strided_cm_subtile) {
for (size_t k = 1; k <= 5; k += 2) {
for (uint32_t m = 1; m <= 4; m++) {
for (uint32_t n = 1; n <= 4; n++) {
@@ -82430,13 +82430,13 @@
.k(k)
.cm_stride(7)
.iterations(1)
- .Test(xnn_qc8_gemm_minmax_fp32_ukernel_4x4__scalar_magic, xnn_init_qs8_minmax_scalar_magic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qc8_gemm_minmax_fp32_ukernel_4x4__scalar_fmagic, xnn_init_qs8_minmax_scalar_fmagic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
}
}
-TEST(QC8_GEMM_MINMAX_FP32_4X4__SCALAR_MAGIC, qmin) {
+TEST(QC8_GEMM_MINMAX_FP32_4X4__SCALAR_FMAGIC, qmin) {
GemmMicrokernelTester()
.mr(4)
.nr(4)
@@ -82446,10 +82446,10 @@
.n(4)
.k(1)
.qmin(128)
- .Test(xnn_qc8_gemm_minmax_fp32_ukernel_4x4__scalar_magic, xnn_init_qs8_minmax_scalar_magic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qc8_gemm_minmax_fp32_ukernel_4x4__scalar_fmagic, xnn_init_qs8_minmax_scalar_fmagic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
-TEST(QC8_GEMM_MINMAX_FP32_4X4__SCALAR_MAGIC, qmax) {
+TEST(QC8_GEMM_MINMAX_FP32_4X4__SCALAR_FMAGIC, qmax) {
GemmMicrokernelTester()
.mr(4)
.nr(4)
@@ -82459,10 +82459,10 @@
.n(4)
.k(1)
.qmax(128)
- .Test(xnn_qc8_gemm_minmax_fp32_ukernel_4x4__scalar_magic, xnn_init_qs8_minmax_scalar_magic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qc8_gemm_minmax_fp32_ukernel_4x4__scalar_fmagic, xnn_init_qs8_minmax_scalar_fmagic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
-TEST(QC8_GEMM_MINMAX_FP32_4X4__SCALAR_MAGIC, strided_cm) {
+TEST(QC8_GEMM_MINMAX_FP32_4X4__SCALAR_FMAGIC, strided_cm) {
GemmMicrokernelTester()
.mr(4)
.nr(4)
@@ -82472,5 +82472,5 @@
.n(4)
.k(1)
.cm_stride(7)
- .Test(xnn_qc8_gemm_minmax_fp32_ukernel_4x4__scalar_magic, xnn_init_qs8_minmax_scalar_magic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qc8_gemm_minmax_fp32_ukernel_4x4__scalar_fmagic, xnn_init_qs8_minmax_scalar_fmagic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
diff --git a/test/qc8-gemm-minmax-fp32.yaml b/test/qc8-gemm-minmax-fp32.yaml
index 6f1d7d1..23e095c 100644
--- a/test/qc8-gemm-minmax-fp32.yaml
+++ b/test/qc8-gemm-minmax-fp32.yaml
@@ -539,51 +539,51 @@
- name: xnn_qc8_gemm_minmax_fp32_ukernel_3x4c8__wasmsimd_mul16_ld128
init: xnn_init_qs8_minmax_wasmsimd_params
k-block: 8
-- name: xnn_qc8_gemm_minmax_fp32_ukernel_1x2__scalar_lrint
- init: xnn_init_qs8_minmax_scalar_lrint_params
+- name: xnn_qc8_gemm_minmax_fp32_ukernel_1x2__scalar_lrintf
+ init: xnn_init_qs8_minmax_scalar_lrintf_params
k-block: 1
-- name: xnn_qc8_gemm_minmax_fp32_ukernel_2x2__scalar_lrint
- init: xnn_init_qs8_minmax_scalar_lrint_params
+- name: xnn_qc8_gemm_minmax_fp32_ukernel_2x2__scalar_lrintf
+ init: xnn_init_qs8_minmax_scalar_lrintf_params
k-block: 1
-- name: xnn_qc8_gemm_minmax_fp32_ukernel_3x2__scalar_lrint
- init: xnn_init_qs8_minmax_scalar_lrint_params
+- name: xnn_qc8_gemm_minmax_fp32_ukernel_3x2__scalar_lrintf
+ init: xnn_init_qs8_minmax_scalar_lrintf_params
k-block: 1
-- name: xnn_qc8_gemm_minmax_fp32_ukernel_4x2__scalar_lrint
- init: xnn_init_qs8_minmax_scalar_lrint_params
+- name: xnn_qc8_gemm_minmax_fp32_ukernel_4x2__scalar_lrintf
+ init: xnn_init_qs8_minmax_scalar_lrintf_params
k-block: 1
-- name: xnn_qc8_gemm_minmax_fp32_ukernel_1x4__scalar_lrint
- init: xnn_init_qs8_minmax_scalar_lrint_params
+- name: xnn_qc8_gemm_minmax_fp32_ukernel_1x4__scalar_lrintf
+ init: xnn_init_qs8_minmax_scalar_lrintf_params
k-block: 1
-- name: xnn_qc8_gemm_minmax_fp32_ukernel_2x4__scalar_lrint
- init: xnn_init_qs8_minmax_scalar_lrint_params
+- name: xnn_qc8_gemm_minmax_fp32_ukernel_2x4__scalar_lrintf
+ init: xnn_init_qs8_minmax_scalar_lrintf_params
k-block: 1
-- name: xnn_qc8_gemm_minmax_fp32_ukernel_3x4__scalar_lrint
- init: xnn_init_qs8_minmax_scalar_lrint_params
+- name: xnn_qc8_gemm_minmax_fp32_ukernel_3x4__scalar_lrintf
+ init: xnn_init_qs8_minmax_scalar_lrintf_params
k-block: 1
-- name: xnn_qc8_gemm_minmax_fp32_ukernel_4x4__scalar_lrint
- init: xnn_init_qs8_minmax_scalar_lrint_params
+- name: xnn_qc8_gemm_minmax_fp32_ukernel_4x4__scalar_lrintf
+ init: xnn_init_qs8_minmax_scalar_lrintf_params
k-block: 1
-- name: xnn_qc8_gemm_minmax_fp32_ukernel_1x2__scalar_magic
- init: xnn_init_qs8_minmax_scalar_magic_params
+- name: xnn_qc8_gemm_minmax_fp32_ukernel_1x2__scalar_fmagic
+ init: xnn_init_qs8_minmax_scalar_fmagic_params
k-block: 1
-- name: xnn_qc8_gemm_minmax_fp32_ukernel_2x2__scalar_magic
- init: xnn_init_qs8_minmax_scalar_magic_params
+- name: xnn_qc8_gemm_minmax_fp32_ukernel_2x2__scalar_fmagic
+ init: xnn_init_qs8_minmax_scalar_fmagic_params
k-block: 1
-- name: xnn_qc8_gemm_minmax_fp32_ukernel_3x2__scalar_magic
- init: xnn_init_qs8_minmax_scalar_magic_params
+- name: xnn_qc8_gemm_minmax_fp32_ukernel_3x2__scalar_fmagic
+ init: xnn_init_qs8_minmax_scalar_fmagic_params
k-block: 1
-- name: xnn_qc8_gemm_minmax_fp32_ukernel_4x2__scalar_magic
- init: xnn_init_qs8_minmax_scalar_magic_params
+- name: xnn_qc8_gemm_minmax_fp32_ukernel_4x2__scalar_fmagic
+ init: xnn_init_qs8_minmax_scalar_fmagic_params
k-block: 1
-- name: xnn_qc8_gemm_minmax_fp32_ukernel_1x4__scalar_magic
- init: xnn_init_qs8_minmax_scalar_magic_params
+- name: xnn_qc8_gemm_minmax_fp32_ukernel_1x4__scalar_fmagic
+ init: xnn_init_qs8_minmax_scalar_fmagic_params
k-block: 1
-- name: xnn_qc8_gemm_minmax_fp32_ukernel_2x4__scalar_magic
- init: xnn_init_qs8_minmax_scalar_magic_params
+- name: xnn_qc8_gemm_minmax_fp32_ukernel_2x4__scalar_fmagic
+ init: xnn_init_qs8_minmax_scalar_fmagic_params
k-block: 1
-- name: xnn_qc8_gemm_minmax_fp32_ukernel_3x4__scalar_magic
- init: xnn_init_qs8_minmax_scalar_magic_params
+- name: xnn_qc8_gemm_minmax_fp32_ukernel_3x4__scalar_fmagic
+ init: xnn_init_qs8_minmax_scalar_fmagic_params
k-block: 1
-- name: xnn_qc8_gemm_minmax_fp32_ukernel_4x4__scalar_magic
- init: xnn_init_qs8_minmax_scalar_magic_params
+- name: xnn_qc8_gemm_minmax_fp32_ukernel_4x4__scalar_fmagic
+ init: xnn_init_qs8_minmax_scalar_fmagic_params
k-block: 1
diff --git a/test/qc8-igemm-minmax-fp32.cc b/test/qc8-igemm-minmax-fp32.cc
index 4149050..de7517e 100644
--- a/test/qc8-igemm-minmax-fp32.cc
+++ b/test/qc8-igemm-minmax-fp32.cc
@@ -75029,7 +75029,7 @@
#endif // XNN_ARCH_WASMSIMD || XNN_ARCH_WASMRELAXEDSIMD
-TEST(QC8_IGEMM_MINMAX_FP32_1X2__SCALAR_LRINT, k_eq_1) {
+TEST(QC8_IGEMM_MINMAX_FP32_1X2__SCALAR_LRINTF, k_eq_1) {
GemmMicrokernelTester()
.mr(1)
.nr(2)
@@ -75038,10 +75038,10 @@
.m(1)
.n(2)
.k(1)
- .Test(xnn_qc8_igemm_minmax_fp32_ukernel_1x2__scalar_lrint, xnn_init_qs8_minmax_scalar_lrint_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qc8_igemm_minmax_fp32_ukernel_1x2__scalar_lrintf, xnn_init_qs8_minmax_scalar_lrintf_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
-TEST(QC8_IGEMM_MINMAX_FP32_1X2__SCALAR_LRINT, strided_cn) {
+TEST(QC8_IGEMM_MINMAX_FP32_1X2__SCALAR_LRINTF, strided_cn) {
GemmMicrokernelTester()
.mr(1)
.nr(2)
@@ -75051,10 +75051,10 @@
.n(2)
.k(1)
.cn_stride(5)
- .Test(xnn_qc8_igemm_minmax_fp32_ukernel_1x2__scalar_lrint, xnn_init_qs8_minmax_scalar_lrint_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qc8_igemm_minmax_fp32_ukernel_1x2__scalar_lrintf, xnn_init_qs8_minmax_scalar_lrintf_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
-TEST(QC8_IGEMM_MINMAX_FP32_1X2__SCALAR_LRINT, k_eq_1_subtile) {
+TEST(QC8_IGEMM_MINMAX_FP32_1X2__SCALAR_LRINTF, k_eq_1_subtile) {
for (uint32_t m = 1; m <= 1; m++) {
for (uint32_t n = 1; n <= 2; n++) {
GemmMicrokernelTester()
@@ -75066,12 +75066,12 @@
.n(n)
.k(1)
.iterations(1)
- .Test(xnn_qc8_igemm_minmax_fp32_ukernel_1x2__scalar_lrint, xnn_init_qs8_minmax_scalar_lrint_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qc8_igemm_minmax_fp32_ukernel_1x2__scalar_lrintf, xnn_init_qs8_minmax_scalar_lrintf_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
}
-TEST(QC8_IGEMM_MINMAX_FP32_1X2__SCALAR_LRINT, k_eq_1_subtile_m) {
+TEST(QC8_IGEMM_MINMAX_FP32_1X2__SCALAR_LRINTF, k_eq_1_subtile_m) {
for (uint32_t m = 1; m <= 1; m++) {
GemmMicrokernelTester()
.mr(1)
@@ -75082,11 +75082,11 @@
.n(2)
.k(1)
.iterations(1)
- .Test(xnn_qc8_igemm_minmax_fp32_ukernel_1x2__scalar_lrint, xnn_init_qs8_minmax_scalar_lrint_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qc8_igemm_minmax_fp32_ukernel_1x2__scalar_lrintf, xnn_init_qs8_minmax_scalar_lrintf_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
-TEST(QC8_IGEMM_MINMAX_FP32_1X2__SCALAR_LRINT, k_eq_1_subtile_n) {
+TEST(QC8_IGEMM_MINMAX_FP32_1X2__SCALAR_LRINTF, k_eq_1_subtile_n) {
for (uint32_t n = 1; n <= 2; n++) {
GemmMicrokernelTester()
.mr(1)
@@ -75097,11 +75097,11 @@
.n(n)
.k(1)
.iterations(1)
- .Test(xnn_qc8_igemm_minmax_fp32_ukernel_1x2__scalar_lrint, xnn_init_qs8_minmax_scalar_lrint_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qc8_igemm_minmax_fp32_ukernel_1x2__scalar_lrintf, xnn_init_qs8_minmax_scalar_lrintf_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
-TEST(QC8_IGEMM_MINMAX_FP32_1X2__SCALAR_LRINT, k_gt_1) {
+TEST(QC8_IGEMM_MINMAX_FP32_1X2__SCALAR_LRINTF, k_gt_1) {
for (size_t k = 2; k < 10; k++) {
GemmMicrokernelTester()
.mr(1)
@@ -75111,11 +75111,11 @@
.m(1)
.n(2)
.k(k)
- .Test(xnn_qc8_igemm_minmax_fp32_ukernel_1x2__scalar_lrint, xnn_init_qs8_minmax_scalar_lrint_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qc8_igemm_minmax_fp32_ukernel_1x2__scalar_lrintf, xnn_init_qs8_minmax_scalar_lrintf_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
-TEST(QC8_IGEMM_MINMAX_FP32_1X2__SCALAR_LRINT, k_gt_1_subtile) {
+TEST(QC8_IGEMM_MINMAX_FP32_1X2__SCALAR_LRINTF, k_gt_1_subtile) {
for (size_t k = 2; k < 10; k++) {
for (uint32_t m = 1; m <= 1; m++) {
for (uint32_t n = 1; n <= 2; n++) {
@@ -75128,13 +75128,13 @@
.n(n)
.k(k)
.iterations(1)
- .Test(xnn_qc8_igemm_minmax_fp32_ukernel_1x2__scalar_lrint, xnn_init_qs8_minmax_scalar_lrint_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qc8_igemm_minmax_fp32_ukernel_1x2__scalar_lrintf, xnn_init_qs8_minmax_scalar_lrintf_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
}
}
-TEST(QC8_IGEMM_MINMAX_FP32_1X2__SCALAR_LRINT, n_gt_2) {
+TEST(QC8_IGEMM_MINMAX_FP32_1X2__SCALAR_LRINTF, n_gt_2) {
for (uint32_t n = 3; n < 4; n++) {
for (size_t k = 1; k <= 5; k += 2) {
GemmMicrokernelTester()
@@ -75145,12 +75145,12 @@
.m(1)
.n(2)
.k(k)
- .Test(xnn_qc8_igemm_minmax_fp32_ukernel_1x2__scalar_lrint, xnn_init_qs8_minmax_scalar_lrint_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qc8_igemm_minmax_fp32_ukernel_1x2__scalar_lrintf, xnn_init_qs8_minmax_scalar_lrintf_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
}
-TEST(QC8_IGEMM_MINMAX_FP32_1X2__SCALAR_LRINT, n_gt_2_strided_cn) {
+TEST(QC8_IGEMM_MINMAX_FP32_1X2__SCALAR_LRINTF, n_gt_2_strided_cn) {
for (uint32_t n = 3; n < 4; n++) {
for (size_t k = 1; k <= 5; k += 2) {
GemmMicrokernelTester()
@@ -75162,12 +75162,12 @@
.n(2)
.k(k)
.cn_stride(5)
- .Test(xnn_qc8_igemm_minmax_fp32_ukernel_1x2__scalar_lrint, xnn_init_qs8_minmax_scalar_lrint_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qc8_igemm_minmax_fp32_ukernel_1x2__scalar_lrintf, xnn_init_qs8_minmax_scalar_lrintf_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
}
-TEST(QC8_IGEMM_MINMAX_FP32_1X2__SCALAR_LRINT, n_gt_2_subtile) {
+TEST(QC8_IGEMM_MINMAX_FP32_1X2__SCALAR_LRINTF, n_gt_2_subtile) {
for (uint32_t n = 3; n < 4; n++) {
for (size_t k = 1; k <= 5; k += 2) {
for (uint32_t m = 1; m <= 1; m++) {
@@ -75180,13 +75180,13 @@
.n(n)
.k(k)
.iterations(1)
- .Test(xnn_qc8_igemm_minmax_fp32_ukernel_1x2__scalar_lrint, xnn_init_qs8_minmax_scalar_lrint_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qc8_igemm_minmax_fp32_ukernel_1x2__scalar_lrintf, xnn_init_qs8_minmax_scalar_lrintf_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
}
}
-TEST(QC8_IGEMM_MINMAX_FP32_1X2__SCALAR_LRINT, n_div_2) {
+TEST(QC8_IGEMM_MINMAX_FP32_1X2__SCALAR_LRINTF, n_div_2) {
for (uint32_t n = 4; n <= 6; n += 2) {
for (size_t k = 1; k <= 5; k += 2) {
GemmMicrokernelTester()
@@ -75197,12 +75197,12 @@
.m(1)
.n(2)
.k(k)
- .Test(xnn_qc8_igemm_minmax_fp32_ukernel_1x2__scalar_lrint, xnn_init_qs8_minmax_scalar_lrint_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qc8_igemm_minmax_fp32_ukernel_1x2__scalar_lrintf, xnn_init_qs8_minmax_scalar_lrintf_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
}
-TEST(QC8_IGEMM_MINMAX_FP32_1X2__SCALAR_LRINT, n_div_2_strided_cn) {
+TEST(QC8_IGEMM_MINMAX_FP32_1X2__SCALAR_LRINTF, n_div_2_strided_cn) {
for (uint32_t n = 4; n <= 6; n += 2) {
for (size_t k = 1; k <= 5; k += 2) {
GemmMicrokernelTester()
@@ -75214,12 +75214,12 @@
.n(n)
.k(k)
.cn_stride(5)
- .Test(xnn_qc8_igemm_minmax_fp32_ukernel_1x2__scalar_lrint, xnn_init_qs8_minmax_scalar_lrint_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qc8_igemm_minmax_fp32_ukernel_1x2__scalar_lrintf, xnn_init_qs8_minmax_scalar_lrintf_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
}
-TEST(QC8_IGEMM_MINMAX_FP32_1X2__SCALAR_LRINT, n_div_2_subtile) {
+TEST(QC8_IGEMM_MINMAX_FP32_1X2__SCALAR_LRINTF, n_div_2_subtile) {
for (uint32_t n = 4; n <= 6; n += 2) {
for (size_t k = 1; k <= 5; k += 2) {
for (uint32_t m = 1; m <= 1; m++) {
@@ -75232,13 +75232,13 @@
.n(n)
.k(k)
.iterations(1)
- .Test(xnn_qc8_igemm_minmax_fp32_ukernel_1x2__scalar_lrint, xnn_init_qs8_minmax_scalar_lrint_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qc8_igemm_minmax_fp32_ukernel_1x2__scalar_lrintf, xnn_init_qs8_minmax_scalar_lrintf_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
}
}
-TEST(QC8_IGEMM_MINMAX_FP32_1X2__SCALAR_LRINT, small_kernel) {
+TEST(QC8_IGEMM_MINMAX_FP32_1X2__SCALAR_LRINTF, small_kernel) {
for (size_t k = 1; k <= 5; k += 2) {
GemmMicrokernelTester()
.mr(1)
@@ -75249,11 +75249,11 @@
.n(2)
.k(k)
.ks(3)
- .Test(xnn_qc8_igemm_minmax_fp32_ukernel_1x2__scalar_lrint, xnn_init_qs8_minmax_scalar_lrint_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qc8_igemm_minmax_fp32_ukernel_1x2__scalar_lrintf, xnn_init_qs8_minmax_scalar_lrintf_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
-TEST(QC8_IGEMM_MINMAX_FP32_1X2__SCALAR_LRINT, small_kernel_subtile) {
+TEST(QC8_IGEMM_MINMAX_FP32_1X2__SCALAR_LRINTF, small_kernel_subtile) {
for (size_t k = 1; k <= 5; k += 2) {
for (uint32_t m = 1; m <= 1; m++) {
for (uint32_t n = 1; n <= 2; n++) {
@@ -75267,13 +75267,13 @@
.k(k)
.ks(3)
.iterations(1)
- .Test(xnn_qc8_igemm_minmax_fp32_ukernel_1x2__scalar_lrint, xnn_init_qs8_minmax_scalar_lrint_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qc8_igemm_minmax_fp32_ukernel_1x2__scalar_lrintf, xnn_init_qs8_minmax_scalar_lrintf_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
}
}
-TEST(QC8_IGEMM_MINMAX_FP32_1X2__SCALAR_LRINT, n_gt_2_small_kernel) {
+TEST(QC8_IGEMM_MINMAX_FP32_1X2__SCALAR_LRINTF, n_gt_2_small_kernel) {
for (uint32_t n = 3; n < 4; n++) {
for (size_t k = 1; k <= 5; k += 2) {
GemmMicrokernelTester()
@@ -75285,12 +75285,12 @@
.n(2)
.k(k)
.ks(3)
- .Test(xnn_qc8_igemm_minmax_fp32_ukernel_1x2__scalar_lrint, xnn_init_qs8_minmax_scalar_lrint_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qc8_igemm_minmax_fp32_ukernel_1x2__scalar_lrintf, xnn_init_qs8_minmax_scalar_lrintf_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
}
-TEST(QC8_IGEMM_MINMAX_FP32_1X2__SCALAR_LRINT, n_div_2_small_kernel) {
+TEST(QC8_IGEMM_MINMAX_FP32_1X2__SCALAR_LRINTF, n_div_2_small_kernel) {
for (uint32_t n = 4; n <= 6; n += 2) {
for (size_t k = 1; k <= 5; k += 2) {
GemmMicrokernelTester()
@@ -75302,12 +75302,12 @@
.n(2)
.k(k)
.ks(3)
- .Test(xnn_qc8_igemm_minmax_fp32_ukernel_1x2__scalar_lrint, xnn_init_qs8_minmax_scalar_lrint_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qc8_igemm_minmax_fp32_ukernel_1x2__scalar_lrintf, xnn_init_qs8_minmax_scalar_lrintf_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
}
-TEST(QC8_IGEMM_MINMAX_FP32_1X2__SCALAR_LRINT, strided_cm_subtile) {
+TEST(QC8_IGEMM_MINMAX_FP32_1X2__SCALAR_LRINTF, strided_cm_subtile) {
for (size_t k = 1; k <= 5; k += 2) {
for (uint32_t m = 1; m <= 1; m++) {
for (uint32_t n = 1; n <= 2; n++) {
@@ -75321,13 +75321,13 @@
.k(k)
.cm_stride(5)
.iterations(1)
- .Test(xnn_qc8_igemm_minmax_fp32_ukernel_1x2__scalar_lrint, xnn_init_qs8_minmax_scalar_lrint_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qc8_igemm_minmax_fp32_ukernel_1x2__scalar_lrintf, xnn_init_qs8_minmax_scalar_lrintf_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
}
}
-TEST(QC8_IGEMM_MINMAX_FP32_1X2__SCALAR_LRINT, a_offset) {
+TEST(QC8_IGEMM_MINMAX_FP32_1X2__SCALAR_LRINTF, a_offset) {
for (size_t k = 1; k <= 5; k += 2) {
GemmMicrokernelTester()
.mr(1)
@@ -75339,11 +75339,11 @@
.k(k)
.ks(3)
.a_offset(7)
- .Test(xnn_qc8_igemm_minmax_fp32_ukernel_1x2__scalar_lrint, xnn_init_qs8_minmax_scalar_lrint_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qc8_igemm_minmax_fp32_ukernel_1x2__scalar_lrintf, xnn_init_qs8_minmax_scalar_lrintf_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
-TEST(QC8_IGEMM_MINMAX_FP32_1X2__SCALAR_LRINT, zero) {
+TEST(QC8_IGEMM_MINMAX_FP32_1X2__SCALAR_LRINTF, zero) {
for (uint32_t mz = 0; mz < 1; mz++) {
for (size_t k = 1; k <= 5; k += 2) {
GemmMicrokernelTester()
@@ -75357,12 +75357,12 @@
.ks(3)
.a_offset(7)
.zero_index(mz)
- .Test(xnn_qc8_igemm_minmax_fp32_ukernel_1x2__scalar_lrint, xnn_init_qs8_minmax_scalar_lrint_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qc8_igemm_minmax_fp32_ukernel_1x2__scalar_lrintf, xnn_init_qs8_minmax_scalar_lrintf_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
}
-TEST(QC8_IGEMM_MINMAX_FP32_1X2__SCALAR_LRINT, qmin) {
+TEST(QC8_IGEMM_MINMAX_FP32_1X2__SCALAR_LRINTF, qmin) {
GemmMicrokernelTester()
.mr(1)
.nr(2)
@@ -75372,10 +75372,10 @@
.n(2)
.k(1)
.qmin(128)
- .Test(xnn_qc8_igemm_minmax_fp32_ukernel_1x2__scalar_lrint, xnn_init_qs8_minmax_scalar_lrint_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qc8_igemm_minmax_fp32_ukernel_1x2__scalar_lrintf, xnn_init_qs8_minmax_scalar_lrintf_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
-TEST(QC8_IGEMM_MINMAX_FP32_1X2__SCALAR_LRINT, qmax) {
+TEST(QC8_IGEMM_MINMAX_FP32_1X2__SCALAR_LRINTF, qmax) {
GemmMicrokernelTester()
.mr(1)
.nr(2)
@@ -75385,10 +75385,10 @@
.n(2)
.k(1)
.qmax(128)
- .Test(xnn_qc8_igemm_minmax_fp32_ukernel_1x2__scalar_lrint, xnn_init_qs8_minmax_scalar_lrint_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qc8_igemm_minmax_fp32_ukernel_1x2__scalar_lrintf, xnn_init_qs8_minmax_scalar_lrintf_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
-TEST(QC8_IGEMM_MINMAX_FP32_1X2__SCALAR_LRINT, strided_cm) {
+TEST(QC8_IGEMM_MINMAX_FP32_1X2__SCALAR_LRINTF, strided_cm) {
GemmMicrokernelTester()
.mr(1)
.nr(2)
@@ -75398,11 +75398,11 @@
.n(2)
.k(1)
.cm_stride(5)
- .Test(xnn_qc8_igemm_minmax_fp32_ukernel_1x2__scalar_lrint, xnn_init_qs8_minmax_scalar_lrint_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qc8_igemm_minmax_fp32_ukernel_1x2__scalar_lrintf, xnn_init_qs8_minmax_scalar_lrintf_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
-TEST(QC8_IGEMM_MINMAX_FP32_2X2__SCALAR_LRINT, k_eq_1) {
+TEST(QC8_IGEMM_MINMAX_FP32_2X2__SCALAR_LRINTF, k_eq_1) {
GemmMicrokernelTester()
.mr(2)
.nr(2)
@@ -75411,10 +75411,10 @@
.m(2)
.n(2)
.k(1)
- .Test(xnn_qc8_igemm_minmax_fp32_ukernel_2x2__scalar_lrint, xnn_init_qs8_minmax_scalar_lrint_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qc8_igemm_minmax_fp32_ukernel_2x2__scalar_lrintf, xnn_init_qs8_minmax_scalar_lrintf_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
-TEST(QC8_IGEMM_MINMAX_FP32_2X2__SCALAR_LRINT, strided_cn) {
+TEST(QC8_IGEMM_MINMAX_FP32_2X2__SCALAR_LRINTF, strided_cn) {
GemmMicrokernelTester()
.mr(2)
.nr(2)
@@ -75424,10 +75424,10 @@
.n(2)
.k(1)
.cn_stride(5)
- .Test(xnn_qc8_igemm_minmax_fp32_ukernel_2x2__scalar_lrint, xnn_init_qs8_minmax_scalar_lrint_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qc8_igemm_minmax_fp32_ukernel_2x2__scalar_lrintf, xnn_init_qs8_minmax_scalar_lrintf_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
-TEST(QC8_IGEMM_MINMAX_FP32_2X2__SCALAR_LRINT, k_eq_1_subtile) {
+TEST(QC8_IGEMM_MINMAX_FP32_2X2__SCALAR_LRINTF, k_eq_1_subtile) {
for (uint32_t m = 1; m <= 2; m++) {
for (uint32_t n = 1; n <= 2; n++) {
GemmMicrokernelTester()
@@ -75439,12 +75439,12 @@
.n(n)
.k(1)
.iterations(1)
- .Test(xnn_qc8_igemm_minmax_fp32_ukernel_2x2__scalar_lrint, xnn_init_qs8_minmax_scalar_lrint_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qc8_igemm_minmax_fp32_ukernel_2x2__scalar_lrintf, xnn_init_qs8_minmax_scalar_lrintf_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
}
-TEST(QC8_IGEMM_MINMAX_FP32_2X2__SCALAR_LRINT, k_eq_1_subtile_m) {
+TEST(QC8_IGEMM_MINMAX_FP32_2X2__SCALAR_LRINTF, k_eq_1_subtile_m) {
for (uint32_t m = 1; m <= 2; m++) {
GemmMicrokernelTester()
.mr(2)
@@ -75455,11 +75455,11 @@
.n(2)
.k(1)
.iterations(1)
- .Test(xnn_qc8_igemm_minmax_fp32_ukernel_2x2__scalar_lrint, xnn_init_qs8_minmax_scalar_lrint_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qc8_igemm_minmax_fp32_ukernel_2x2__scalar_lrintf, xnn_init_qs8_minmax_scalar_lrintf_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
-TEST(QC8_IGEMM_MINMAX_FP32_2X2__SCALAR_LRINT, k_eq_1_subtile_n) {
+TEST(QC8_IGEMM_MINMAX_FP32_2X2__SCALAR_LRINTF, k_eq_1_subtile_n) {
for (uint32_t n = 1; n <= 2; n++) {
GemmMicrokernelTester()
.mr(2)
@@ -75470,11 +75470,11 @@
.n(n)
.k(1)
.iterations(1)
- .Test(xnn_qc8_igemm_minmax_fp32_ukernel_2x2__scalar_lrint, xnn_init_qs8_minmax_scalar_lrint_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qc8_igemm_minmax_fp32_ukernel_2x2__scalar_lrintf, xnn_init_qs8_minmax_scalar_lrintf_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
-TEST(QC8_IGEMM_MINMAX_FP32_2X2__SCALAR_LRINT, k_gt_1) {
+TEST(QC8_IGEMM_MINMAX_FP32_2X2__SCALAR_LRINTF, k_gt_1) {
for (size_t k = 2; k < 10; k++) {
GemmMicrokernelTester()
.mr(2)
@@ -75484,11 +75484,11 @@
.m(2)
.n(2)
.k(k)
- .Test(xnn_qc8_igemm_minmax_fp32_ukernel_2x2__scalar_lrint, xnn_init_qs8_minmax_scalar_lrint_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qc8_igemm_minmax_fp32_ukernel_2x2__scalar_lrintf, xnn_init_qs8_minmax_scalar_lrintf_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
-TEST(QC8_IGEMM_MINMAX_FP32_2X2__SCALAR_LRINT, k_gt_1_subtile) {
+TEST(QC8_IGEMM_MINMAX_FP32_2X2__SCALAR_LRINTF, k_gt_1_subtile) {
for (size_t k = 2; k < 10; k++) {
for (uint32_t m = 1; m <= 2; m++) {
for (uint32_t n = 1; n <= 2; n++) {
@@ -75501,13 +75501,13 @@
.n(n)
.k(k)
.iterations(1)
- .Test(xnn_qc8_igemm_minmax_fp32_ukernel_2x2__scalar_lrint, xnn_init_qs8_minmax_scalar_lrint_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qc8_igemm_minmax_fp32_ukernel_2x2__scalar_lrintf, xnn_init_qs8_minmax_scalar_lrintf_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
}
}
-TEST(QC8_IGEMM_MINMAX_FP32_2X2__SCALAR_LRINT, n_gt_2) {
+TEST(QC8_IGEMM_MINMAX_FP32_2X2__SCALAR_LRINTF, n_gt_2) {
for (uint32_t n = 3; n < 4; n++) {
for (size_t k = 1; k <= 5; k += 2) {
GemmMicrokernelTester()
@@ -75518,12 +75518,12 @@
.m(2)
.n(2)
.k(k)
- .Test(xnn_qc8_igemm_minmax_fp32_ukernel_2x2__scalar_lrint, xnn_init_qs8_minmax_scalar_lrint_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qc8_igemm_minmax_fp32_ukernel_2x2__scalar_lrintf, xnn_init_qs8_minmax_scalar_lrintf_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
}
-TEST(QC8_IGEMM_MINMAX_FP32_2X2__SCALAR_LRINT, n_gt_2_strided_cn) {
+TEST(QC8_IGEMM_MINMAX_FP32_2X2__SCALAR_LRINTF, n_gt_2_strided_cn) {
for (uint32_t n = 3; n < 4; n++) {
for (size_t k = 1; k <= 5; k += 2) {
GemmMicrokernelTester()
@@ -75535,12 +75535,12 @@
.n(2)
.k(k)
.cn_stride(5)
- .Test(xnn_qc8_igemm_minmax_fp32_ukernel_2x2__scalar_lrint, xnn_init_qs8_minmax_scalar_lrint_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qc8_igemm_minmax_fp32_ukernel_2x2__scalar_lrintf, xnn_init_qs8_minmax_scalar_lrintf_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
}
-TEST(QC8_IGEMM_MINMAX_FP32_2X2__SCALAR_LRINT, n_gt_2_subtile) {
+TEST(QC8_IGEMM_MINMAX_FP32_2X2__SCALAR_LRINTF, n_gt_2_subtile) {
for (uint32_t n = 3; n < 4; n++) {
for (size_t k = 1; k <= 5; k += 2) {
for (uint32_t m = 1; m <= 2; m++) {
@@ -75553,13 +75553,13 @@
.n(n)
.k(k)
.iterations(1)
- .Test(xnn_qc8_igemm_minmax_fp32_ukernel_2x2__scalar_lrint, xnn_init_qs8_minmax_scalar_lrint_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qc8_igemm_minmax_fp32_ukernel_2x2__scalar_lrintf, xnn_init_qs8_minmax_scalar_lrintf_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
}
}
-TEST(QC8_IGEMM_MINMAX_FP32_2X2__SCALAR_LRINT, n_div_2) {
+TEST(QC8_IGEMM_MINMAX_FP32_2X2__SCALAR_LRINTF, n_div_2) {
for (uint32_t n = 4; n <= 6; n += 2) {
for (size_t k = 1; k <= 5; k += 2) {
GemmMicrokernelTester()
@@ -75570,12 +75570,12 @@
.m(2)
.n(2)
.k(k)
- .Test(xnn_qc8_igemm_minmax_fp32_ukernel_2x2__scalar_lrint, xnn_init_qs8_minmax_scalar_lrint_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qc8_igemm_minmax_fp32_ukernel_2x2__scalar_lrintf, xnn_init_qs8_minmax_scalar_lrintf_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
}
-TEST(QC8_IGEMM_MINMAX_FP32_2X2__SCALAR_LRINT, n_div_2_strided_cn) {
+TEST(QC8_IGEMM_MINMAX_FP32_2X2__SCALAR_LRINTF, n_div_2_strided_cn) {
for (uint32_t n = 4; n <= 6; n += 2) {
for (size_t k = 1; k <= 5; k += 2) {
GemmMicrokernelTester()
@@ -75587,12 +75587,12 @@
.n(n)
.k(k)
.cn_stride(5)
- .Test(xnn_qc8_igemm_minmax_fp32_ukernel_2x2__scalar_lrint, xnn_init_qs8_minmax_scalar_lrint_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qc8_igemm_minmax_fp32_ukernel_2x2__scalar_lrintf, xnn_init_qs8_minmax_scalar_lrintf_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
}
-TEST(QC8_IGEMM_MINMAX_FP32_2X2__SCALAR_LRINT, n_div_2_subtile) {
+TEST(QC8_IGEMM_MINMAX_FP32_2X2__SCALAR_LRINTF, n_div_2_subtile) {
for (uint32_t n = 4; n <= 6; n += 2) {
for (size_t k = 1; k <= 5; k += 2) {
for (uint32_t m = 1; m <= 2; m++) {
@@ -75605,13 +75605,13 @@
.n(n)
.k(k)
.iterations(1)
- .Test(xnn_qc8_igemm_minmax_fp32_ukernel_2x2__scalar_lrint, xnn_init_qs8_minmax_scalar_lrint_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qc8_igemm_minmax_fp32_ukernel_2x2__scalar_lrintf, xnn_init_qs8_minmax_scalar_lrintf_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
}
}
-TEST(QC8_IGEMM_MINMAX_FP32_2X2__SCALAR_LRINT, small_kernel) {
+TEST(QC8_IGEMM_MINMAX_FP32_2X2__SCALAR_LRINTF, small_kernel) {
for (size_t k = 1; k <= 5; k += 2) {
GemmMicrokernelTester()
.mr(2)
@@ -75622,11 +75622,11 @@
.n(2)
.k(k)
.ks(3)
- .Test(xnn_qc8_igemm_minmax_fp32_ukernel_2x2__scalar_lrint, xnn_init_qs8_minmax_scalar_lrint_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qc8_igemm_minmax_fp32_ukernel_2x2__scalar_lrintf, xnn_init_qs8_minmax_scalar_lrintf_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
-TEST(QC8_IGEMM_MINMAX_FP32_2X2__SCALAR_LRINT, small_kernel_subtile) {
+TEST(QC8_IGEMM_MINMAX_FP32_2X2__SCALAR_LRINTF, small_kernel_subtile) {
for (size_t k = 1; k <= 5; k += 2) {
for (uint32_t m = 1; m <= 2; m++) {
for (uint32_t n = 1; n <= 2; n++) {
@@ -75640,13 +75640,13 @@
.k(k)
.ks(3)
.iterations(1)
- .Test(xnn_qc8_igemm_minmax_fp32_ukernel_2x2__scalar_lrint, xnn_init_qs8_minmax_scalar_lrint_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qc8_igemm_minmax_fp32_ukernel_2x2__scalar_lrintf, xnn_init_qs8_minmax_scalar_lrintf_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
}
}
-TEST(QC8_IGEMM_MINMAX_FP32_2X2__SCALAR_LRINT, n_gt_2_small_kernel) {
+TEST(QC8_IGEMM_MINMAX_FP32_2X2__SCALAR_LRINTF, n_gt_2_small_kernel) {
for (uint32_t n = 3; n < 4; n++) {
for (size_t k = 1; k <= 5; k += 2) {
GemmMicrokernelTester()
@@ -75658,12 +75658,12 @@
.n(2)
.k(k)
.ks(3)
- .Test(xnn_qc8_igemm_minmax_fp32_ukernel_2x2__scalar_lrint, xnn_init_qs8_minmax_scalar_lrint_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qc8_igemm_minmax_fp32_ukernel_2x2__scalar_lrintf, xnn_init_qs8_minmax_scalar_lrintf_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
}
-TEST(QC8_IGEMM_MINMAX_FP32_2X2__SCALAR_LRINT, n_div_2_small_kernel) {
+TEST(QC8_IGEMM_MINMAX_FP32_2X2__SCALAR_LRINTF, n_div_2_small_kernel) {
for (uint32_t n = 4; n <= 6; n += 2) {
for (size_t k = 1; k <= 5; k += 2) {
GemmMicrokernelTester()
@@ -75675,12 +75675,12 @@
.n(2)
.k(k)
.ks(3)
- .Test(xnn_qc8_igemm_minmax_fp32_ukernel_2x2__scalar_lrint, xnn_init_qs8_minmax_scalar_lrint_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qc8_igemm_minmax_fp32_ukernel_2x2__scalar_lrintf, xnn_init_qs8_minmax_scalar_lrintf_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
}
-TEST(QC8_IGEMM_MINMAX_FP32_2X2__SCALAR_LRINT, strided_cm_subtile) {
+TEST(QC8_IGEMM_MINMAX_FP32_2X2__SCALAR_LRINTF, strided_cm_subtile) {
for (size_t k = 1; k <= 5; k += 2) {
for (uint32_t m = 1; m <= 2; m++) {
for (uint32_t n = 1; n <= 2; n++) {
@@ -75694,13 +75694,13 @@
.k(k)
.cm_stride(5)
.iterations(1)
- .Test(xnn_qc8_igemm_minmax_fp32_ukernel_2x2__scalar_lrint, xnn_init_qs8_minmax_scalar_lrint_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qc8_igemm_minmax_fp32_ukernel_2x2__scalar_lrintf, xnn_init_qs8_minmax_scalar_lrintf_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
}
}
-TEST(QC8_IGEMM_MINMAX_FP32_2X2__SCALAR_LRINT, a_offset) {
+TEST(QC8_IGEMM_MINMAX_FP32_2X2__SCALAR_LRINTF, a_offset) {
for (size_t k = 1; k <= 5; k += 2) {
GemmMicrokernelTester()
.mr(2)
@@ -75712,11 +75712,11 @@
.k(k)
.ks(3)
.a_offset(13)
- .Test(xnn_qc8_igemm_minmax_fp32_ukernel_2x2__scalar_lrint, xnn_init_qs8_minmax_scalar_lrint_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qc8_igemm_minmax_fp32_ukernel_2x2__scalar_lrintf, xnn_init_qs8_minmax_scalar_lrintf_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
-TEST(QC8_IGEMM_MINMAX_FP32_2X2__SCALAR_LRINT, zero) {
+TEST(QC8_IGEMM_MINMAX_FP32_2X2__SCALAR_LRINTF, zero) {
for (uint32_t mz = 0; mz < 2; mz++) {
for (size_t k = 1; k <= 5; k += 2) {
GemmMicrokernelTester()
@@ -75730,12 +75730,12 @@
.ks(3)
.a_offset(13)
.zero_index(mz)
- .Test(xnn_qc8_igemm_minmax_fp32_ukernel_2x2__scalar_lrint, xnn_init_qs8_minmax_scalar_lrint_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qc8_igemm_minmax_fp32_ukernel_2x2__scalar_lrintf, xnn_init_qs8_minmax_scalar_lrintf_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
}
-TEST(QC8_IGEMM_MINMAX_FP32_2X2__SCALAR_LRINT, qmin) {
+TEST(QC8_IGEMM_MINMAX_FP32_2X2__SCALAR_LRINTF, qmin) {
GemmMicrokernelTester()
.mr(2)
.nr(2)
@@ -75745,10 +75745,10 @@
.n(2)
.k(1)
.qmin(128)
- .Test(xnn_qc8_igemm_minmax_fp32_ukernel_2x2__scalar_lrint, xnn_init_qs8_minmax_scalar_lrint_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qc8_igemm_minmax_fp32_ukernel_2x2__scalar_lrintf, xnn_init_qs8_minmax_scalar_lrintf_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
-TEST(QC8_IGEMM_MINMAX_FP32_2X2__SCALAR_LRINT, qmax) {
+TEST(QC8_IGEMM_MINMAX_FP32_2X2__SCALAR_LRINTF, qmax) {
GemmMicrokernelTester()
.mr(2)
.nr(2)
@@ -75758,10 +75758,10 @@
.n(2)
.k(1)
.qmax(128)
- .Test(xnn_qc8_igemm_minmax_fp32_ukernel_2x2__scalar_lrint, xnn_init_qs8_minmax_scalar_lrint_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qc8_igemm_minmax_fp32_ukernel_2x2__scalar_lrintf, xnn_init_qs8_minmax_scalar_lrintf_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
-TEST(QC8_IGEMM_MINMAX_FP32_2X2__SCALAR_LRINT, strided_cm) {
+TEST(QC8_IGEMM_MINMAX_FP32_2X2__SCALAR_LRINTF, strided_cm) {
GemmMicrokernelTester()
.mr(2)
.nr(2)
@@ -75771,11 +75771,11 @@
.n(2)
.k(1)
.cm_stride(5)
- .Test(xnn_qc8_igemm_minmax_fp32_ukernel_2x2__scalar_lrint, xnn_init_qs8_minmax_scalar_lrint_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qc8_igemm_minmax_fp32_ukernel_2x2__scalar_lrintf, xnn_init_qs8_minmax_scalar_lrintf_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
-TEST(QC8_IGEMM_MINMAX_FP32_3X2__SCALAR_LRINT, k_eq_1) {
+TEST(QC8_IGEMM_MINMAX_FP32_3X2__SCALAR_LRINTF, k_eq_1) {
GemmMicrokernelTester()
.mr(3)
.nr(2)
@@ -75784,10 +75784,10 @@
.m(3)
.n(2)
.k(1)
- .Test(xnn_qc8_igemm_minmax_fp32_ukernel_3x2__scalar_lrint, xnn_init_qs8_minmax_scalar_lrint_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qc8_igemm_minmax_fp32_ukernel_3x2__scalar_lrintf, xnn_init_qs8_minmax_scalar_lrintf_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
-TEST(QC8_IGEMM_MINMAX_FP32_3X2__SCALAR_LRINT, strided_cn) {
+TEST(QC8_IGEMM_MINMAX_FP32_3X2__SCALAR_LRINTF, strided_cn) {
GemmMicrokernelTester()
.mr(3)
.nr(2)
@@ -75797,10 +75797,10 @@
.n(2)
.k(1)
.cn_stride(5)
- .Test(xnn_qc8_igemm_minmax_fp32_ukernel_3x2__scalar_lrint, xnn_init_qs8_minmax_scalar_lrint_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qc8_igemm_minmax_fp32_ukernel_3x2__scalar_lrintf, xnn_init_qs8_minmax_scalar_lrintf_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
-TEST(QC8_IGEMM_MINMAX_FP32_3X2__SCALAR_LRINT, k_eq_1_subtile) {
+TEST(QC8_IGEMM_MINMAX_FP32_3X2__SCALAR_LRINTF, k_eq_1_subtile) {
for (uint32_t m = 1; m <= 3; m++) {
for (uint32_t n = 1; n <= 2; n++) {
GemmMicrokernelTester()
@@ -75812,12 +75812,12 @@
.n(n)
.k(1)
.iterations(1)
- .Test(xnn_qc8_igemm_minmax_fp32_ukernel_3x2__scalar_lrint, xnn_init_qs8_minmax_scalar_lrint_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qc8_igemm_minmax_fp32_ukernel_3x2__scalar_lrintf, xnn_init_qs8_minmax_scalar_lrintf_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
}
-TEST(QC8_IGEMM_MINMAX_FP32_3X2__SCALAR_LRINT, k_eq_1_subtile_m) {
+TEST(QC8_IGEMM_MINMAX_FP32_3X2__SCALAR_LRINTF, k_eq_1_subtile_m) {
for (uint32_t m = 1; m <= 3; m++) {
GemmMicrokernelTester()
.mr(3)
@@ -75828,11 +75828,11 @@
.n(2)
.k(1)
.iterations(1)
- .Test(xnn_qc8_igemm_minmax_fp32_ukernel_3x2__scalar_lrint, xnn_init_qs8_minmax_scalar_lrint_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qc8_igemm_minmax_fp32_ukernel_3x2__scalar_lrintf, xnn_init_qs8_minmax_scalar_lrintf_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
-TEST(QC8_IGEMM_MINMAX_FP32_3X2__SCALAR_LRINT, k_eq_1_subtile_n) {
+TEST(QC8_IGEMM_MINMAX_FP32_3X2__SCALAR_LRINTF, k_eq_1_subtile_n) {
for (uint32_t n = 1; n <= 2; n++) {
GemmMicrokernelTester()
.mr(3)
@@ -75843,11 +75843,11 @@
.n(n)
.k(1)
.iterations(1)
- .Test(xnn_qc8_igemm_minmax_fp32_ukernel_3x2__scalar_lrint, xnn_init_qs8_minmax_scalar_lrint_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qc8_igemm_minmax_fp32_ukernel_3x2__scalar_lrintf, xnn_init_qs8_minmax_scalar_lrintf_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
-TEST(QC8_IGEMM_MINMAX_FP32_3X2__SCALAR_LRINT, k_gt_1) {
+TEST(QC8_IGEMM_MINMAX_FP32_3X2__SCALAR_LRINTF, k_gt_1) {
for (size_t k = 2; k < 10; k++) {
GemmMicrokernelTester()
.mr(3)
@@ -75857,11 +75857,11 @@
.m(3)
.n(2)
.k(k)
- .Test(xnn_qc8_igemm_minmax_fp32_ukernel_3x2__scalar_lrint, xnn_init_qs8_minmax_scalar_lrint_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qc8_igemm_minmax_fp32_ukernel_3x2__scalar_lrintf, xnn_init_qs8_minmax_scalar_lrintf_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
-TEST(QC8_IGEMM_MINMAX_FP32_3X2__SCALAR_LRINT, k_gt_1_subtile) {
+TEST(QC8_IGEMM_MINMAX_FP32_3X2__SCALAR_LRINTF, k_gt_1_subtile) {
for (size_t k = 2; k < 10; k++) {
for (uint32_t m = 1; m <= 3; m++) {
for (uint32_t n = 1; n <= 2; n++) {
@@ -75874,13 +75874,13 @@
.n(n)
.k(k)
.iterations(1)
- .Test(xnn_qc8_igemm_minmax_fp32_ukernel_3x2__scalar_lrint, xnn_init_qs8_minmax_scalar_lrint_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qc8_igemm_minmax_fp32_ukernel_3x2__scalar_lrintf, xnn_init_qs8_minmax_scalar_lrintf_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
}
}
-TEST(QC8_IGEMM_MINMAX_FP32_3X2__SCALAR_LRINT, n_gt_2) {
+TEST(QC8_IGEMM_MINMAX_FP32_3X2__SCALAR_LRINTF, n_gt_2) {
for (uint32_t n = 3; n < 4; n++) {
for (size_t k = 1; k <= 5; k += 2) {
GemmMicrokernelTester()
@@ -75891,12 +75891,12 @@
.m(3)
.n(2)
.k(k)
- .Test(xnn_qc8_igemm_minmax_fp32_ukernel_3x2__scalar_lrint, xnn_init_qs8_minmax_scalar_lrint_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qc8_igemm_minmax_fp32_ukernel_3x2__scalar_lrintf, xnn_init_qs8_minmax_scalar_lrintf_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
}
-TEST(QC8_IGEMM_MINMAX_FP32_3X2__SCALAR_LRINT, n_gt_2_strided_cn) {
+TEST(QC8_IGEMM_MINMAX_FP32_3X2__SCALAR_LRINTF, n_gt_2_strided_cn) {
for (uint32_t n = 3; n < 4; n++) {
for (size_t k = 1; k <= 5; k += 2) {
GemmMicrokernelTester()
@@ -75908,12 +75908,12 @@
.n(2)
.k(k)
.cn_stride(5)
- .Test(xnn_qc8_igemm_minmax_fp32_ukernel_3x2__scalar_lrint, xnn_init_qs8_minmax_scalar_lrint_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qc8_igemm_minmax_fp32_ukernel_3x2__scalar_lrintf, xnn_init_qs8_minmax_scalar_lrintf_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
}
-TEST(QC8_IGEMM_MINMAX_FP32_3X2__SCALAR_LRINT, n_gt_2_subtile) {
+TEST(QC8_IGEMM_MINMAX_FP32_3X2__SCALAR_LRINTF, n_gt_2_subtile) {
for (uint32_t n = 3; n < 4; n++) {
for (size_t k = 1; k <= 5; k += 2) {
for (uint32_t m = 1; m <= 3; m++) {
@@ -75926,13 +75926,13 @@
.n(n)
.k(k)
.iterations(1)
- .Test(xnn_qc8_igemm_minmax_fp32_ukernel_3x2__scalar_lrint, xnn_init_qs8_minmax_scalar_lrint_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qc8_igemm_minmax_fp32_ukernel_3x2__scalar_lrintf, xnn_init_qs8_minmax_scalar_lrintf_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
}
}
-TEST(QC8_IGEMM_MINMAX_FP32_3X2__SCALAR_LRINT, n_div_2) {
+TEST(QC8_IGEMM_MINMAX_FP32_3X2__SCALAR_LRINTF, n_div_2) {
for (uint32_t n = 4; n <= 6; n += 2) {
for (size_t k = 1; k <= 5; k += 2) {
GemmMicrokernelTester()
@@ -75943,12 +75943,12 @@
.m(3)
.n(2)
.k(k)
- .Test(xnn_qc8_igemm_minmax_fp32_ukernel_3x2__scalar_lrint, xnn_init_qs8_minmax_scalar_lrint_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qc8_igemm_minmax_fp32_ukernel_3x2__scalar_lrintf, xnn_init_qs8_minmax_scalar_lrintf_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
}
-TEST(QC8_IGEMM_MINMAX_FP32_3X2__SCALAR_LRINT, n_div_2_strided_cn) {
+TEST(QC8_IGEMM_MINMAX_FP32_3X2__SCALAR_LRINTF, n_div_2_strided_cn) {
for (uint32_t n = 4; n <= 6; n += 2) {
for (size_t k = 1; k <= 5; k += 2) {
GemmMicrokernelTester()
@@ -75960,12 +75960,12 @@
.n(n)
.k(k)
.cn_stride(5)
- .Test(xnn_qc8_igemm_minmax_fp32_ukernel_3x2__scalar_lrint, xnn_init_qs8_minmax_scalar_lrint_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qc8_igemm_minmax_fp32_ukernel_3x2__scalar_lrintf, xnn_init_qs8_minmax_scalar_lrintf_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
}
-TEST(QC8_IGEMM_MINMAX_FP32_3X2__SCALAR_LRINT, n_div_2_subtile) {
+TEST(QC8_IGEMM_MINMAX_FP32_3X2__SCALAR_LRINTF, n_div_2_subtile) {
for (uint32_t n = 4; n <= 6; n += 2) {
for (size_t k = 1; k <= 5; k += 2) {
for (uint32_t m = 1; m <= 3; m++) {
@@ -75978,13 +75978,13 @@
.n(n)
.k(k)
.iterations(1)
- .Test(xnn_qc8_igemm_minmax_fp32_ukernel_3x2__scalar_lrint, xnn_init_qs8_minmax_scalar_lrint_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qc8_igemm_minmax_fp32_ukernel_3x2__scalar_lrintf, xnn_init_qs8_minmax_scalar_lrintf_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
}
}
-TEST(QC8_IGEMM_MINMAX_FP32_3X2__SCALAR_LRINT, small_kernel) {
+TEST(QC8_IGEMM_MINMAX_FP32_3X2__SCALAR_LRINTF, small_kernel) {
for (size_t k = 1; k <= 5; k += 2) {
GemmMicrokernelTester()
.mr(3)
@@ -75995,11 +75995,11 @@
.n(2)
.k(k)
.ks(3)
- .Test(xnn_qc8_igemm_minmax_fp32_ukernel_3x2__scalar_lrint, xnn_init_qs8_minmax_scalar_lrint_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qc8_igemm_minmax_fp32_ukernel_3x2__scalar_lrintf, xnn_init_qs8_minmax_scalar_lrintf_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
-TEST(QC8_IGEMM_MINMAX_FP32_3X2__SCALAR_LRINT, small_kernel_subtile) {
+TEST(QC8_IGEMM_MINMAX_FP32_3X2__SCALAR_LRINTF, small_kernel_subtile) {
for (size_t k = 1; k <= 5; k += 2) {
for (uint32_t m = 1; m <= 3; m++) {
for (uint32_t n = 1; n <= 2; n++) {
@@ -76013,13 +76013,13 @@
.k(k)
.ks(3)
.iterations(1)
- .Test(xnn_qc8_igemm_minmax_fp32_ukernel_3x2__scalar_lrint, xnn_init_qs8_minmax_scalar_lrint_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qc8_igemm_minmax_fp32_ukernel_3x2__scalar_lrintf, xnn_init_qs8_minmax_scalar_lrintf_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
}
}
-TEST(QC8_IGEMM_MINMAX_FP32_3X2__SCALAR_LRINT, n_gt_2_small_kernel) {
+TEST(QC8_IGEMM_MINMAX_FP32_3X2__SCALAR_LRINTF, n_gt_2_small_kernel) {
for (uint32_t n = 3; n < 4; n++) {
for (size_t k = 1; k <= 5; k += 2) {
GemmMicrokernelTester()
@@ -76031,12 +76031,12 @@
.n(2)
.k(k)
.ks(3)
- .Test(xnn_qc8_igemm_minmax_fp32_ukernel_3x2__scalar_lrint, xnn_init_qs8_minmax_scalar_lrint_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qc8_igemm_minmax_fp32_ukernel_3x2__scalar_lrintf, xnn_init_qs8_minmax_scalar_lrintf_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
}
-TEST(QC8_IGEMM_MINMAX_FP32_3X2__SCALAR_LRINT, n_div_2_small_kernel) {
+TEST(QC8_IGEMM_MINMAX_FP32_3X2__SCALAR_LRINTF, n_div_2_small_kernel) {
for (uint32_t n = 4; n <= 6; n += 2) {
for (size_t k = 1; k <= 5; k += 2) {
GemmMicrokernelTester()
@@ -76048,12 +76048,12 @@
.n(2)
.k(k)
.ks(3)
- .Test(xnn_qc8_igemm_minmax_fp32_ukernel_3x2__scalar_lrint, xnn_init_qs8_minmax_scalar_lrint_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qc8_igemm_minmax_fp32_ukernel_3x2__scalar_lrintf, xnn_init_qs8_minmax_scalar_lrintf_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
}
-TEST(QC8_IGEMM_MINMAX_FP32_3X2__SCALAR_LRINT, strided_cm_subtile) {
+TEST(QC8_IGEMM_MINMAX_FP32_3X2__SCALAR_LRINTF, strided_cm_subtile) {
for (size_t k = 1; k <= 5; k += 2) {
for (uint32_t m = 1; m <= 3; m++) {
for (uint32_t n = 1; n <= 2; n++) {
@@ -76067,13 +76067,13 @@
.k(k)
.cm_stride(5)
.iterations(1)
- .Test(xnn_qc8_igemm_minmax_fp32_ukernel_3x2__scalar_lrint, xnn_init_qs8_minmax_scalar_lrint_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qc8_igemm_minmax_fp32_ukernel_3x2__scalar_lrintf, xnn_init_qs8_minmax_scalar_lrintf_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
}
}
-TEST(QC8_IGEMM_MINMAX_FP32_3X2__SCALAR_LRINT, a_offset) {
+TEST(QC8_IGEMM_MINMAX_FP32_3X2__SCALAR_LRINTF, a_offset) {
for (size_t k = 1; k <= 5; k += 2) {
GemmMicrokernelTester()
.mr(3)
@@ -76085,11 +76085,11 @@
.k(k)
.ks(3)
.a_offset(17)
- .Test(xnn_qc8_igemm_minmax_fp32_ukernel_3x2__scalar_lrint, xnn_init_qs8_minmax_scalar_lrint_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qc8_igemm_minmax_fp32_ukernel_3x2__scalar_lrintf, xnn_init_qs8_minmax_scalar_lrintf_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
-TEST(QC8_IGEMM_MINMAX_FP32_3X2__SCALAR_LRINT, zero) {
+TEST(QC8_IGEMM_MINMAX_FP32_3X2__SCALAR_LRINTF, zero) {
for (uint32_t mz = 0; mz < 3; mz++) {
for (size_t k = 1; k <= 5; k += 2) {
GemmMicrokernelTester()
@@ -76103,12 +76103,12 @@
.ks(3)
.a_offset(17)
.zero_index(mz)
- .Test(xnn_qc8_igemm_minmax_fp32_ukernel_3x2__scalar_lrint, xnn_init_qs8_minmax_scalar_lrint_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qc8_igemm_minmax_fp32_ukernel_3x2__scalar_lrintf, xnn_init_qs8_minmax_scalar_lrintf_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
}
-TEST(QC8_IGEMM_MINMAX_FP32_3X2__SCALAR_LRINT, qmin) {
+TEST(QC8_IGEMM_MINMAX_FP32_3X2__SCALAR_LRINTF, qmin) {
GemmMicrokernelTester()
.mr(3)
.nr(2)
@@ -76118,10 +76118,10 @@
.n(2)
.k(1)
.qmin(128)
- .Test(xnn_qc8_igemm_minmax_fp32_ukernel_3x2__scalar_lrint, xnn_init_qs8_minmax_scalar_lrint_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qc8_igemm_minmax_fp32_ukernel_3x2__scalar_lrintf, xnn_init_qs8_minmax_scalar_lrintf_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
-TEST(QC8_IGEMM_MINMAX_FP32_3X2__SCALAR_LRINT, qmax) {
+TEST(QC8_IGEMM_MINMAX_FP32_3X2__SCALAR_LRINTF, qmax) {
GemmMicrokernelTester()
.mr(3)
.nr(2)
@@ -76131,10 +76131,10 @@
.n(2)
.k(1)
.qmax(128)
- .Test(xnn_qc8_igemm_minmax_fp32_ukernel_3x2__scalar_lrint, xnn_init_qs8_minmax_scalar_lrint_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qc8_igemm_minmax_fp32_ukernel_3x2__scalar_lrintf, xnn_init_qs8_minmax_scalar_lrintf_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
-TEST(QC8_IGEMM_MINMAX_FP32_3X2__SCALAR_LRINT, strided_cm) {
+TEST(QC8_IGEMM_MINMAX_FP32_3X2__SCALAR_LRINTF, strided_cm) {
GemmMicrokernelTester()
.mr(3)
.nr(2)
@@ -76144,11 +76144,11 @@
.n(2)
.k(1)
.cm_stride(5)
- .Test(xnn_qc8_igemm_minmax_fp32_ukernel_3x2__scalar_lrint, xnn_init_qs8_minmax_scalar_lrint_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qc8_igemm_minmax_fp32_ukernel_3x2__scalar_lrintf, xnn_init_qs8_minmax_scalar_lrintf_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
-TEST(QC8_IGEMM_MINMAX_FP32_4X2__SCALAR_LRINT, k_eq_1) {
+TEST(QC8_IGEMM_MINMAX_FP32_4X2__SCALAR_LRINTF, k_eq_1) {
GemmMicrokernelTester()
.mr(4)
.nr(2)
@@ -76157,10 +76157,10 @@
.m(4)
.n(2)
.k(1)
- .Test(xnn_qc8_igemm_minmax_fp32_ukernel_4x2__scalar_lrint, xnn_init_qs8_minmax_scalar_lrint_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qc8_igemm_minmax_fp32_ukernel_4x2__scalar_lrintf, xnn_init_qs8_minmax_scalar_lrintf_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
-TEST(QC8_IGEMM_MINMAX_FP32_4X2__SCALAR_LRINT, strided_cn) {
+TEST(QC8_IGEMM_MINMAX_FP32_4X2__SCALAR_LRINTF, strided_cn) {
GemmMicrokernelTester()
.mr(4)
.nr(2)
@@ -76170,10 +76170,10 @@
.n(2)
.k(1)
.cn_stride(5)
- .Test(xnn_qc8_igemm_minmax_fp32_ukernel_4x2__scalar_lrint, xnn_init_qs8_minmax_scalar_lrint_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qc8_igemm_minmax_fp32_ukernel_4x2__scalar_lrintf, xnn_init_qs8_minmax_scalar_lrintf_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
-TEST(QC8_IGEMM_MINMAX_FP32_4X2__SCALAR_LRINT, k_eq_1_subtile) {
+TEST(QC8_IGEMM_MINMAX_FP32_4X2__SCALAR_LRINTF, k_eq_1_subtile) {
for (uint32_t m = 1; m <= 4; m++) {
for (uint32_t n = 1; n <= 2; n++) {
GemmMicrokernelTester()
@@ -76185,12 +76185,12 @@
.n(n)
.k(1)
.iterations(1)
- .Test(xnn_qc8_igemm_minmax_fp32_ukernel_4x2__scalar_lrint, xnn_init_qs8_minmax_scalar_lrint_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qc8_igemm_minmax_fp32_ukernel_4x2__scalar_lrintf, xnn_init_qs8_minmax_scalar_lrintf_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
}
-TEST(QC8_IGEMM_MINMAX_FP32_4X2__SCALAR_LRINT, k_eq_1_subtile_m) {
+TEST(QC8_IGEMM_MINMAX_FP32_4X2__SCALAR_LRINTF, k_eq_1_subtile_m) {
for (uint32_t m = 1; m <= 4; m++) {
GemmMicrokernelTester()
.mr(4)
@@ -76201,11 +76201,11 @@
.n(2)
.k(1)
.iterations(1)
- .Test(xnn_qc8_igemm_minmax_fp32_ukernel_4x2__scalar_lrint, xnn_init_qs8_minmax_scalar_lrint_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qc8_igemm_minmax_fp32_ukernel_4x2__scalar_lrintf, xnn_init_qs8_minmax_scalar_lrintf_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
-TEST(QC8_IGEMM_MINMAX_FP32_4X2__SCALAR_LRINT, k_eq_1_subtile_n) {
+TEST(QC8_IGEMM_MINMAX_FP32_4X2__SCALAR_LRINTF, k_eq_1_subtile_n) {
for (uint32_t n = 1; n <= 2; n++) {
GemmMicrokernelTester()
.mr(4)
@@ -76216,11 +76216,11 @@
.n(n)
.k(1)
.iterations(1)
- .Test(xnn_qc8_igemm_minmax_fp32_ukernel_4x2__scalar_lrint, xnn_init_qs8_minmax_scalar_lrint_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qc8_igemm_minmax_fp32_ukernel_4x2__scalar_lrintf, xnn_init_qs8_minmax_scalar_lrintf_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
-TEST(QC8_IGEMM_MINMAX_FP32_4X2__SCALAR_LRINT, k_gt_1) {
+TEST(QC8_IGEMM_MINMAX_FP32_4X2__SCALAR_LRINTF, k_gt_1) {
for (size_t k = 2; k < 10; k++) {
GemmMicrokernelTester()
.mr(4)
@@ -76230,11 +76230,11 @@
.m(4)
.n(2)
.k(k)
- .Test(xnn_qc8_igemm_minmax_fp32_ukernel_4x2__scalar_lrint, xnn_init_qs8_minmax_scalar_lrint_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qc8_igemm_minmax_fp32_ukernel_4x2__scalar_lrintf, xnn_init_qs8_minmax_scalar_lrintf_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
-TEST(QC8_IGEMM_MINMAX_FP32_4X2__SCALAR_LRINT, k_gt_1_subtile) {
+TEST(QC8_IGEMM_MINMAX_FP32_4X2__SCALAR_LRINTF, k_gt_1_subtile) {
for (size_t k = 2; k < 10; k++) {
for (uint32_t m = 1; m <= 4; m++) {
for (uint32_t n = 1; n <= 2; n++) {
@@ -76247,13 +76247,13 @@
.n(n)
.k(k)
.iterations(1)
- .Test(xnn_qc8_igemm_minmax_fp32_ukernel_4x2__scalar_lrint, xnn_init_qs8_minmax_scalar_lrint_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qc8_igemm_minmax_fp32_ukernel_4x2__scalar_lrintf, xnn_init_qs8_minmax_scalar_lrintf_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
}
}
-TEST(QC8_IGEMM_MINMAX_FP32_4X2__SCALAR_LRINT, n_gt_2) {
+TEST(QC8_IGEMM_MINMAX_FP32_4X2__SCALAR_LRINTF, n_gt_2) {
for (uint32_t n = 3; n < 4; n++) {
for (size_t k = 1; k <= 5; k += 2) {
GemmMicrokernelTester()
@@ -76264,12 +76264,12 @@
.m(4)
.n(2)
.k(k)
- .Test(xnn_qc8_igemm_minmax_fp32_ukernel_4x2__scalar_lrint, xnn_init_qs8_minmax_scalar_lrint_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qc8_igemm_minmax_fp32_ukernel_4x2__scalar_lrintf, xnn_init_qs8_minmax_scalar_lrintf_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
}
-TEST(QC8_IGEMM_MINMAX_FP32_4X2__SCALAR_LRINT, n_gt_2_strided_cn) {
+TEST(QC8_IGEMM_MINMAX_FP32_4X2__SCALAR_LRINTF, n_gt_2_strided_cn) {
for (uint32_t n = 3; n < 4; n++) {
for (size_t k = 1; k <= 5; k += 2) {
GemmMicrokernelTester()
@@ -76281,12 +76281,12 @@
.n(2)
.k(k)
.cn_stride(5)
- .Test(xnn_qc8_igemm_minmax_fp32_ukernel_4x2__scalar_lrint, xnn_init_qs8_minmax_scalar_lrint_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qc8_igemm_minmax_fp32_ukernel_4x2__scalar_lrintf, xnn_init_qs8_minmax_scalar_lrintf_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
}
-TEST(QC8_IGEMM_MINMAX_FP32_4X2__SCALAR_LRINT, n_gt_2_subtile) {
+TEST(QC8_IGEMM_MINMAX_FP32_4X2__SCALAR_LRINTF, n_gt_2_subtile) {
for (uint32_t n = 3; n < 4; n++) {
for (size_t k = 1; k <= 5; k += 2) {
for (uint32_t m = 1; m <= 4; m++) {
@@ -76299,13 +76299,13 @@
.n(n)
.k(k)
.iterations(1)
- .Test(xnn_qc8_igemm_minmax_fp32_ukernel_4x2__scalar_lrint, xnn_init_qs8_minmax_scalar_lrint_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qc8_igemm_minmax_fp32_ukernel_4x2__scalar_lrintf, xnn_init_qs8_minmax_scalar_lrintf_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
}
}
-TEST(QC8_IGEMM_MINMAX_FP32_4X2__SCALAR_LRINT, n_div_2) {
+TEST(QC8_IGEMM_MINMAX_FP32_4X2__SCALAR_LRINTF, n_div_2) {
for (uint32_t n = 4; n <= 6; n += 2) {
for (size_t k = 1; k <= 5; k += 2) {
GemmMicrokernelTester()
@@ -76316,12 +76316,12 @@
.m(4)
.n(2)
.k(k)
- .Test(xnn_qc8_igemm_minmax_fp32_ukernel_4x2__scalar_lrint, xnn_init_qs8_minmax_scalar_lrint_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qc8_igemm_minmax_fp32_ukernel_4x2__scalar_lrintf, xnn_init_qs8_minmax_scalar_lrintf_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
}
-TEST(QC8_IGEMM_MINMAX_FP32_4X2__SCALAR_LRINT, n_div_2_strided_cn) {
+TEST(QC8_IGEMM_MINMAX_FP32_4X2__SCALAR_LRINTF, n_div_2_strided_cn) {
for (uint32_t n = 4; n <= 6; n += 2) {
for (size_t k = 1; k <= 5; k += 2) {
GemmMicrokernelTester()
@@ -76333,12 +76333,12 @@
.n(n)
.k(k)
.cn_stride(5)
- .Test(xnn_qc8_igemm_minmax_fp32_ukernel_4x2__scalar_lrint, xnn_init_qs8_minmax_scalar_lrint_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qc8_igemm_minmax_fp32_ukernel_4x2__scalar_lrintf, xnn_init_qs8_minmax_scalar_lrintf_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
}
-TEST(QC8_IGEMM_MINMAX_FP32_4X2__SCALAR_LRINT, n_div_2_subtile) {
+TEST(QC8_IGEMM_MINMAX_FP32_4X2__SCALAR_LRINTF, n_div_2_subtile) {
for (uint32_t n = 4; n <= 6; n += 2) {
for (size_t k = 1; k <= 5; k += 2) {
for (uint32_t m = 1; m <= 4; m++) {
@@ -76351,13 +76351,13 @@
.n(n)
.k(k)
.iterations(1)
- .Test(xnn_qc8_igemm_minmax_fp32_ukernel_4x2__scalar_lrint, xnn_init_qs8_minmax_scalar_lrint_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qc8_igemm_minmax_fp32_ukernel_4x2__scalar_lrintf, xnn_init_qs8_minmax_scalar_lrintf_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
}
}
-TEST(QC8_IGEMM_MINMAX_FP32_4X2__SCALAR_LRINT, small_kernel) {
+TEST(QC8_IGEMM_MINMAX_FP32_4X2__SCALAR_LRINTF, small_kernel) {
for (size_t k = 1; k <= 5; k += 2) {
GemmMicrokernelTester()
.mr(4)
@@ -76368,11 +76368,11 @@
.n(2)
.k(k)
.ks(3)
- .Test(xnn_qc8_igemm_minmax_fp32_ukernel_4x2__scalar_lrint, xnn_init_qs8_minmax_scalar_lrint_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qc8_igemm_minmax_fp32_ukernel_4x2__scalar_lrintf, xnn_init_qs8_minmax_scalar_lrintf_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
-TEST(QC8_IGEMM_MINMAX_FP32_4X2__SCALAR_LRINT, small_kernel_subtile) {
+TEST(QC8_IGEMM_MINMAX_FP32_4X2__SCALAR_LRINTF, small_kernel_subtile) {
for (size_t k = 1; k <= 5; k += 2) {
for (uint32_t m = 1; m <= 4; m++) {
for (uint32_t n = 1; n <= 2; n++) {
@@ -76386,13 +76386,13 @@
.k(k)
.ks(3)
.iterations(1)
- .Test(xnn_qc8_igemm_minmax_fp32_ukernel_4x2__scalar_lrint, xnn_init_qs8_minmax_scalar_lrint_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qc8_igemm_minmax_fp32_ukernel_4x2__scalar_lrintf, xnn_init_qs8_minmax_scalar_lrintf_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
}
}
-TEST(QC8_IGEMM_MINMAX_FP32_4X2__SCALAR_LRINT, n_gt_2_small_kernel) {
+TEST(QC8_IGEMM_MINMAX_FP32_4X2__SCALAR_LRINTF, n_gt_2_small_kernel) {
for (uint32_t n = 3; n < 4; n++) {
for (size_t k = 1; k <= 5; k += 2) {
GemmMicrokernelTester()
@@ -76404,12 +76404,12 @@
.n(2)
.k(k)
.ks(3)
- .Test(xnn_qc8_igemm_minmax_fp32_ukernel_4x2__scalar_lrint, xnn_init_qs8_minmax_scalar_lrint_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qc8_igemm_minmax_fp32_ukernel_4x2__scalar_lrintf, xnn_init_qs8_minmax_scalar_lrintf_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
}
-TEST(QC8_IGEMM_MINMAX_FP32_4X2__SCALAR_LRINT, n_div_2_small_kernel) {
+TEST(QC8_IGEMM_MINMAX_FP32_4X2__SCALAR_LRINTF, n_div_2_small_kernel) {
for (uint32_t n = 4; n <= 6; n += 2) {
for (size_t k = 1; k <= 5; k += 2) {
GemmMicrokernelTester()
@@ -76421,12 +76421,12 @@
.n(2)
.k(k)
.ks(3)
- .Test(xnn_qc8_igemm_minmax_fp32_ukernel_4x2__scalar_lrint, xnn_init_qs8_minmax_scalar_lrint_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qc8_igemm_minmax_fp32_ukernel_4x2__scalar_lrintf, xnn_init_qs8_minmax_scalar_lrintf_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
}
-TEST(QC8_IGEMM_MINMAX_FP32_4X2__SCALAR_LRINT, strided_cm_subtile) {
+TEST(QC8_IGEMM_MINMAX_FP32_4X2__SCALAR_LRINTF, strided_cm_subtile) {
for (size_t k = 1; k <= 5; k += 2) {
for (uint32_t m = 1; m <= 4; m++) {
for (uint32_t n = 1; n <= 2; n++) {
@@ -76440,13 +76440,13 @@
.k(k)
.cm_stride(5)
.iterations(1)
- .Test(xnn_qc8_igemm_minmax_fp32_ukernel_4x2__scalar_lrint, xnn_init_qs8_minmax_scalar_lrint_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qc8_igemm_minmax_fp32_ukernel_4x2__scalar_lrintf, xnn_init_qs8_minmax_scalar_lrintf_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
}
}
-TEST(QC8_IGEMM_MINMAX_FP32_4X2__SCALAR_LRINT, a_offset) {
+TEST(QC8_IGEMM_MINMAX_FP32_4X2__SCALAR_LRINTF, a_offset) {
for (size_t k = 1; k <= 5; k += 2) {
GemmMicrokernelTester()
.mr(4)
@@ -76458,11 +76458,11 @@
.k(k)
.ks(3)
.a_offset(23)
- .Test(xnn_qc8_igemm_minmax_fp32_ukernel_4x2__scalar_lrint, xnn_init_qs8_minmax_scalar_lrint_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qc8_igemm_minmax_fp32_ukernel_4x2__scalar_lrintf, xnn_init_qs8_minmax_scalar_lrintf_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
-TEST(QC8_IGEMM_MINMAX_FP32_4X2__SCALAR_LRINT, zero) {
+TEST(QC8_IGEMM_MINMAX_FP32_4X2__SCALAR_LRINTF, zero) {
for (uint32_t mz = 0; mz < 4; mz++) {
for (size_t k = 1; k <= 5; k += 2) {
GemmMicrokernelTester()
@@ -76476,12 +76476,12 @@
.ks(3)
.a_offset(23)
.zero_index(mz)
- .Test(xnn_qc8_igemm_minmax_fp32_ukernel_4x2__scalar_lrint, xnn_init_qs8_minmax_scalar_lrint_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qc8_igemm_minmax_fp32_ukernel_4x2__scalar_lrintf, xnn_init_qs8_minmax_scalar_lrintf_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
}
-TEST(QC8_IGEMM_MINMAX_FP32_4X2__SCALAR_LRINT, qmin) {
+TEST(QC8_IGEMM_MINMAX_FP32_4X2__SCALAR_LRINTF, qmin) {
GemmMicrokernelTester()
.mr(4)
.nr(2)
@@ -76491,10 +76491,10 @@
.n(2)
.k(1)
.qmin(128)
- .Test(xnn_qc8_igemm_minmax_fp32_ukernel_4x2__scalar_lrint, xnn_init_qs8_minmax_scalar_lrint_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qc8_igemm_minmax_fp32_ukernel_4x2__scalar_lrintf, xnn_init_qs8_minmax_scalar_lrintf_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
-TEST(QC8_IGEMM_MINMAX_FP32_4X2__SCALAR_LRINT, qmax) {
+TEST(QC8_IGEMM_MINMAX_FP32_4X2__SCALAR_LRINTF, qmax) {
GemmMicrokernelTester()
.mr(4)
.nr(2)
@@ -76504,10 +76504,10 @@
.n(2)
.k(1)
.qmax(128)
- .Test(xnn_qc8_igemm_minmax_fp32_ukernel_4x2__scalar_lrint, xnn_init_qs8_minmax_scalar_lrint_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qc8_igemm_minmax_fp32_ukernel_4x2__scalar_lrintf, xnn_init_qs8_minmax_scalar_lrintf_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
-TEST(QC8_IGEMM_MINMAX_FP32_4X2__SCALAR_LRINT, strided_cm) {
+TEST(QC8_IGEMM_MINMAX_FP32_4X2__SCALAR_LRINTF, strided_cm) {
GemmMicrokernelTester()
.mr(4)
.nr(2)
@@ -76517,11 +76517,11 @@
.n(2)
.k(1)
.cm_stride(5)
- .Test(xnn_qc8_igemm_minmax_fp32_ukernel_4x2__scalar_lrint, xnn_init_qs8_minmax_scalar_lrint_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qc8_igemm_minmax_fp32_ukernel_4x2__scalar_lrintf, xnn_init_qs8_minmax_scalar_lrintf_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
-TEST(QC8_IGEMM_MINMAX_FP32_1X4__SCALAR_LRINT, k_eq_1) {
+TEST(QC8_IGEMM_MINMAX_FP32_1X4__SCALAR_LRINTF, k_eq_1) {
GemmMicrokernelTester()
.mr(1)
.nr(4)
@@ -76530,10 +76530,10 @@
.m(1)
.n(4)
.k(1)
- .Test(xnn_qc8_igemm_minmax_fp32_ukernel_1x4__scalar_lrint, xnn_init_qs8_minmax_scalar_lrint_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qc8_igemm_minmax_fp32_ukernel_1x4__scalar_lrintf, xnn_init_qs8_minmax_scalar_lrintf_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
-TEST(QC8_IGEMM_MINMAX_FP32_1X4__SCALAR_LRINT, strided_cn) {
+TEST(QC8_IGEMM_MINMAX_FP32_1X4__SCALAR_LRINTF, strided_cn) {
GemmMicrokernelTester()
.mr(1)
.nr(4)
@@ -76543,10 +76543,10 @@
.n(4)
.k(1)
.cn_stride(7)
- .Test(xnn_qc8_igemm_minmax_fp32_ukernel_1x4__scalar_lrint, xnn_init_qs8_minmax_scalar_lrint_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qc8_igemm_minmax_fp32_ukernel_1x4__scalar_lrintf, xnn_init_qs8_minmax_scalar_lrintf_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
-TEST(QC8_IGEMM_MINMAX_FP32_1X4__SCALAR_LRINT, k_eq_1_subtile) {
+TEST(QC8_IGEMM_MINMAX_FP32_1X4__SCALAR_LRINTF, k_eq_1_subtile) {
for (uint32_t m = 1; m <= 1; m++) {
for (uint32_t n = 1; n <= 4; n++) {
GemmMicrokernelTester()
@@ -76558,12 +76558,12 @@
.n(n)
.k(1)
.iterations(1)
- .Test(xnn_qc8_igemm_minmax_fp32_ukernel_1x4__scalar_lrint, xnn_init_qs8_minmax_scalar_lrint_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qc8_igemm_minmax_fp32_ukernel_1x4__scalar_lrintf, xnn_init_qs8_minmax_scalar_lrintf_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
}
-TEST(QC8_IGEMM_MINMAX_FP32_1X4__SCALAR_LRINT, k_eq_1_subtile_m) {
+TEST(QC8_IGEMM_MINMAX_FP32_1X4__SCALAR_LRINTF, k_eq_1_subtile_m) {
for (uint32_t m = 1; m <= 1; m++) {
GemmMicrokernelTester()
.mr(1)
@@ -76574,11 +76574,11 @@
.n(4)
.k(1)
.iterations(1)
- .Test(xnn_qc8_igemm_minmax_fp32_ukernel_1x4__scalar_lrint, xnn_init_qs8_minmax_scalar_lrint_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qc8_igemm_minmax_fp32_ukernel_1x4__scalar_lrintf, xnn_init_qs8_minmax_scalar_lrintf_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
-TEST(QC8_IGEMM_MINMAX_FP32_1X4__SCALAR_LRINT, k_eq_1_subtile_n) {
+TEST(QC8_IGEMM_MINMAX_FP32_1X4__SCALAR_LRINTF, k_eq_1_subtile_n) {
for (uint32_t n = 1; n <= 4; n++) {
GemmMicrokernelTester()
.mr(1)
@@ -76589,11 +76589,11 @@
.n(n)
.k(1)
.iterations(1)
- .Test(xnn_qc8_igemm_minmax_fp32_ukernel_1x4__scalar_lrint, xnn_init_qs8_minmax_scalar_lrint_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qc8_igemm_minmax_fp32_ukernel_1x4__scalar_lrintf, xnn_init_qs8_minmax_scalar_lrintf_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
-TEST(QC8_IGEMM_MINMAX_FP32_1X4__SCALAR_LRINT, k_gt_1) {
+TEST(QC8_IGEMM_MINMAX_FP32_1X4__SCALAR_LRINTF, k_gt_1) {
for (size_t k = 2; k < 10; k++) {
GemmMicrokernelTester()
.mr(1)
@@ -76603,11 +76603,11 @@
.m(1)
.n(4)
.k(k)
- .Test(xnn_qc8_igemm_minmax_fp32_ukernel_1x4__scalar_lrint, xnn_init_qs8_minmax_scalar_lrint_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qc8_igemm_minmax_fp32_ukernel_1x4__scalar_lrintf, xnn_init_qs8_minmax_scalar_lrintf_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
-TEST(QC8_IGEMM_MINMAX_FP32_1X4__SCALAR_LRINT, k_gt_1_subtile) {
+TEST(QC8_IGEMM_MINMAX_FP32_1X4__SCALAR_LRINTF, k_gt_1_subtile) {
for (size_t k = 2; k < 10; k++) {
for (uint32_t m = 1; m <= 1; m++) {
for (uint32_t n = 1; n <= 4; n++) {
@@ -76620,13 +76620,13 @@
.n(n)
.k(k)
.iterations(1)
- .Test(xnn_qc8_igemm_minmax_fp32_ukernel_1x4__scalar_lrint, xnn_init_qs8_minmax_scalar_lrint_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qc8_igemm_minmax_fp32_ukernel_1x4__scalar_lrintf, xnn_init_qs8_minmax_scalar_lrintf_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
}
}
-TEST(QC8_IGEMM_MINMAX_FP32_1X4__SCALAR_LRINT, n_gt_4) {
+TEST(QC8_IGEMM_MINMAX_FP32_1X4__SCALAR_LRINTF, n_gt_4) {
for (uint32_t n = 5; n < 8; n++) {
for (size_t k = 1; k <= 5; k += 2) {
GemmMicrokernelTester()
@@ -76637,12 +76637,12 @@
.m(1)
.n(4)
.k(k)
- .Test(xnn_qc8_igemm_minmax_fp32_ukernel_1x4__scalar_lrint, xnn_init_qs8_minmax_scalar_lrint_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qc8_igemm_minmax_fp32_ukernel_1x4__scalar_lrintf, xnn_init_qs8_minmax_scalar_lrintf_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
}
-TEST(QC8_IGEMM_MINMAX_FP32_1X4__SCALAR_LRINT, n_gt_4_strided_cn) {
+TEST(QC8_IGEMM_MINMAX_FP32_1X4__SCALAR_LRINTF, n_gt_4_strided_cn) {
for (uint32_t n = 5; n < 8; n++) {
for (size_t k = 1; k <= 5; k += 2) {
GemmMicrokernelTester()
@@ -76654,12 +76654,12 @@
.n(4)
.k(k)
.cn_stride(7)
- .Test(xnn_qc8_igemm_minmax_fp32_ukernel_1x4__scalar_lrint, xnn_init_qs8_minmax_scalar_lrint_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qc8_igemm_minmax_fp32_ukernel_1x4__scalar_lrintf, xnn_init_qs8_minmax_scalar_lrintf_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
}
-TEST(QC8_IGEMM_MINMAX_FP32_1X4__SCALAR_LRINT, n_gt_4_subtile) {
+TEST(QC8_IGEMM_MINMAX_FP32_1X4__SCALAR_LRINTF, n_gt_4_subtile) {
for (uint32_t n = 5; n < 8; n++) {
for (size_t k = 1; k <= 5; k += 2) {
for (uint32_t m = 1; m <= 1; m++) {
@@ -76672,13 +76672,13 @@
.n(n)
.k(k)
.iterations(1)
- .Test(xnn_qc8_igemm_minmax_fp32_ukernel_1x4__scalar_lrint, xnn_init_qs8_minmax_scalar_lrint_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qc8_igemm_minmax_fp32_ukernel_1x4__scalar_lrintf, xnn_init_qs8_minmax_scalar_lrintf_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
}
}
-TEST(QC8_IGEMM_MINMAX_FP32_1X4__SCALAR_LRINT, n_div_4) {
+TEST(QC8_IGEMM_MINMAX_FP32_1X4__SCALAR_LRINTF, n_div_4) {
for (uint32_t n = 8; n <= 12; n += 4) {
for (size_t k = 1; k <= 5; k += 2) {
GemmMicrokernelTester()
@@ -76689,12 +76689,12 @@
.m(1)
.n(4)
.k(k)
- .Test(xnn_qc8_igemm_minmax_fp32_ukernel_1x4__scalar_lrint, xnn_init_qs8_minmax_scalar_lrint_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qc8_igemm_minmax_fp32_ukernel_1x4__scalar_lrintf, xnn_init_qs8_minmax_scalar_lrintf_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
}
-TEST(QC8_IGEMM_MINMAX_FP32_1X4__SCALAR_LRINT, n_div_4_strided_cn) {
+TEST(QC8_IGEMM_MINMAX_FP32_1X4__SCALAR_LRINTF, n_div_4_strided_cn) {
for (uint32_t n = 8; n <= 12; n += 4) {
for (size_t k = 1; k <= 5; k += 2) {
GemmMicrokernelTester()
@@ -76706,12 +76706,12 @@
.n(n)
.k(k)
.cn_stride(7)
- .Test(xnn_qc8_igemm_minmax_fp32_ukernel_1x4__scalar_lrint, xnn_init_qs8_minmax_scalar_lrint_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qc8_igemm_minmax_fp32_ukernel_1x4__scalar_lrintf, xnn_init_qs8_minmax_scalar_lrintf_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
}
-TEST(QC8_IGEMM_MINMAX_FP32_1X4__SCALAR_LRINT, n_div_4_subtile) {
+TEST(QC8_IGEMM_MINMAX_FP32_1X4__SCALAR_LRINTF, n_div_4_subtile) {
for (uint32_t n = 8; n <= 12; n += 4) {
for (size_t k = 1; k <= 5; k += 2) {
for (uint32_t m = 1; m <= 1; m++) {
@@ -76724,13 +76724,13 @@
.n(n)
.k(k)
.iterations(1)
- .Test(xnn_qc8_igemm_minmax_fp32_ukernel_1x4__scalar_lrint, xnn_init_qs8_minmax_scalar_lrint_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qc8_igemm_minmax_fp32_ukernel_1x4__scalar_lrintf, xnn_init_qs8_minmax_scalar_lrintf_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
}
}
-TEST(QC8_IGEMM_MINMAX_FP32_1X4__SCALAR_LRINT, small_kernel) {
+TEST(QC8_IGEMM_MINMAX_FP32_1X4__SCALAR_LRINTF, small_kernel) {
for (size_t k = 1; k <= 5; k += 2) {
GemmMicrokernelTester()
.mr(1)
@@ -76741,11 +76741,11 @@
.n(4)
.k(k)
.ks(3)
- .Test(xnn_qc8_igemm_minmax_fp32_ukernel_1x4__scalar_lrint, xnn_init_qs8_minmax_scalar_lrint_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qc8_igemm_minmax_fp32_ukernel_1x4__scalar_lrintf, xnn_init_qs8_minmax_scalar_lrintf_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
-TEST(QC8_IGEMM_MINMAX_FP32_1X4__SCALAR_LRINT, small_kernel_subtile) {
+TEST(QC8_IGEMM_MINMAX_FP32_1X4__SCALAR_LRINTF, small_kernel_subtile) {
for (size_t k = 1; k <= 5; k += 2) {
for (uint32_t m = 1; m <= 1; m++) {
for (uint32_t n = 1; n <= 4; n++) {
@@ -76759,13 +76759,13 @@
.k(k)
.ks(3)
.iterations(1)
- .Test(xnn_qc8_igemm_minmax_fp32_ukernel_1x4__scalar_lrint, xnn_init_qs8_minmax_scalar_lrint_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qc8_igemm_minmax_fp32_ukernel_1x4__scalar_lrintf, xnn_init_qs8_minmax_scalar_lrintf_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
}
}
-TEST(QC8_IGEMM_MINMAX_FP32_1X4__SCALAR_LRINT, n_gt_4_small_kernel) {
+TEST(QC8_IGEMM_MINMAX_FP32_1X4__SCALAR_LRINTF, n_gt_4_small_kernel) {
for (uint32_t n = 5; n < 8; n++) {
for (size_t k = 1; k <= 5; k += 2) {
GemmMicrokernelTester()
@@ -76777,12 +76777,12 @@
.n(4)
.k(k)
.ks(3)
- .Test(xnn_qc8_igemm_minmax_fp32_ukernel_1x4__scalar_lrint, xnn_init_qs8_minmax_scalar_lrint_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qc8_igemm_minmax_fp32_ukernel_1x4__scalar_lrintf, xnn_init_qs8_minmax_scalar_lrintf_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
}
-TEST(QC8_IGEMM_MINMAX_FP32_1X4__SCALAR_LRINT, n_div_4_small_kernel) {
+TEST(QC8_IGEMM_MINMAX_FP32_1X4__SCALAR_LRINTF, n_div_4_small_kernel) {
for (uint32_t n = 8; n <= 12; n += 4) {
for (size_t k = 1; k <= 5; k += 2) {
GemmMicrokernelTester()
@@ -76794,12 +76794,12 @@
.n(4)
.k(k)
.ks(3)
- .Test(xnn_qc8_igemm_minmax_fp32_ukernel_1x4__scalar_lrint, xnn_init_qs8_minmax_scalar_lrint_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qc8_igemm_minmax_fp32_ukernel_1x4__scalar_lrintf, xnn_init_qs8_minmax_scalar_lrintf_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
}
-TEST(QC8_IGEMM_MINMAX_FP32_1X4__SCALAR_LRINT, strided_cm_subtile) {
+TEST(QC8_IGEMM_MINMAX_FP32_1X4__SCALAR_LRINTF, strided_cm_subtile) {
for (size_t k = 1; k <= 5; k += 2) {
for (uint32_t m = 1; m <= 1; m++) {
for (uint32_t n = 1; n <= 4; n++) {
@@ -76813,13 +76813,13 @@
.k(k)
.cm_stride(7)
.iterations(1)
- .Test(xnn_qc8_igemm_minmax_fp32_ukernel_1x4__scalar_lrint, xnn_init_qs8_minmax_scalar_lrint_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qc8_igemm_minmax_fp32_ukernel_1x4__scalar_lrintf, xnn_init_qs8_minmax_scalar_lrintf_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
}
}
-TEST(QC8_IGEMM_MINMAX_FP32_1X4__SCALAR_LRINT, a_offset) {
+TEST(QC8_IGEMM_MINMAX_FP32_1X4__SCALAR_LRINTF, a_offset) {
for (size_t k = 1; k <= 5; k += 2) {
GemmMicrokernelTester()
.mr(1)
@@ -76831,11 +76831,11 @@
.k(k)
.ks(3)
.a_offset(7)
- .Test(xnn_qc8_igemm_minmax_fp32_ukernel_1x4__scalar_lrint, xnn_init_qs8_minmax_scalar_lrint_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qc8_igemm_minmax_fp32_ukernel_1x4__scalar_lrintf, xnn_init_qs8_minmax_scalar_lrintf_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
-TEST(QC8_IGEMM_MINMAX_FP32_1X4__SCALAR_LRINT, zero) {
+TEST(QC8_IGEMM_MINMAX_FP32_1X4__SCALAR_LRINTF, zero) {
for (uint32_t mz = 0; mz < 1; mz++) {
for (size_t k = 1; k <= 5; k += 2) {
GemmMicrokernelTester()
@@ -76849,12 +76849,12 @@
.ks(3)
.a_offset(7)
.zero_index(mz)
- .Test(xnn_qc8_igemm_minmax_fp32_ukernel_1x4__scalar_lrint, xnn_init_qs8_minmax_scalar_lrint_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qc8_igemm_minmax_fp32_ukernel_1x4__scalar_lrintf, xnn_init_qs8_minmax_scalar_lrintf_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
}
-TEST(QC8_IGEMM_MINMAX_FP32_1X4__SCALAR_LRINT, qmin) {
+TEST(QC8_IGEMM_MINMAX_FP32_1X4__SCALAR_LRINTF, qmin) {
GemmMicrokernelTester()
.mr(1)
.nr(4)
@@ -76864,10 +76864,10 @@
.n(4)
.k(1)
.qmin(128)
- .Test(xnn_qc8_igemm_minmax_fp32_ukernel_1x4__scalar_lrint, xnn_init_qs8_minmax_scalar_lrint_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qc8_igemm_minmax_fp32_ukernel_1x4__scalar_lrintf, xnn_init_qs8_minmax_scalar_lrintf_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
-TEST(QC8_IGEMM_MINMAX_FP32_1X4__SCALAR_LRINT, qmax) {
+TEST(QC8_IGEMM_MINMAX_FP32_1X4__SCALAR_LRINTF, qmax) {
GemmMicrokernelTester()
.mr(1)
.nr(4)
@@ -76877,10 +76877,10 @@
.n(4)
.k(1)
.qmax(128)
- .Test(xnn_qc8_igemm_minmax_fp32_ukernel_1x4__scalar_lrint, xnn_init_qs8_minmax_scalar_lrint_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qc8_igemm_minmax_fp32_ukernel_1x4__scalar_lrintf, xnn_init_qs8_minmax_scalar_lrintf_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
-TEST(QC8_IGEMM_MINMAX_FP32_1X4__SCALAR_LRINT, strided_cm) {
+TEST(QC8_IGEMM_MINMAX_FP32_1X4__SCALAR_LRINTF, strided_cm) {
GemmMicrokernelTester()
.mr(1)
.nr(4)
@@ -76890,11 +76890,11 @@
.n(4)
.k(1)
.cm_stride(7)
- .Test(xnn_qc8_igemm_minmax_fp32_ukernel_1x4__scalar_lrint, xnn_init_qs8_minmax_scalar_lrint_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qc8_igemm_minmax_fp32_ukernel_1x4__scalar_lrintf, xnn_init_qs8_minmax_scalar_lrintf_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
-TEST(QC8_IGEMM_MINMAX_FP32_2X4__SCALAR_LRINT, k_eq_1) {
+TEST(QC8_IGEMM_MINMAX_FP32_2X4__SCALAR_LRINTF, k_eq_1) {
GemmMicrokernelTester()
.mr(2)
.nr(4)
@@ -76903,10 +76903,10 @@
.m(2)
.n(4)
.k(1)
- .Test(xnn_qc8_igemm_minmax_fp32_ukernel_2x4__scalar_lrint, xnn_init_qs8_minmax_scalar_lrint_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qc8_igemm_minmax_fp32_ukernel_2x4__scalar_lrintf, xnn_init_qs8_minmax_scalar_lrintf_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
-TEST(QC8_IGEMM_MINMAX_FP32_2X4__SCALAR_LRINT, strided_cn) {
+TEST(QC8_IGEMM_MINMAX_FP32_2X4__SCALAR_LRINTF, strided_cn) {
GemmMicrokernelTester()
.mr(2)
.nr(4)
@@ -76916,10 +76916,10 @@
.n(4)
.k(1)
.cn_stride(7)
- .Test(xnn_qc8_igemm_minmax_fp32_ukernel_2x4__scalar_lrint, xnn_init_qs8_minmax_scalar_lrint_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qc8_igemm_minmax_fp32_ukernel_2x4__scalar_lrintf, xnn_init_qs8_minmax_scalar_lrintf_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
-TEST(QC8_IGEMM_MINMAX_FP32_2X4__SCALAR_LRINT, k_eq_1_subtile) {
+TEST(QC8_IGEMM_MINMAX_FP32_2X4__SCALAR_LRINTF, k_eq_1_subtile) {
for (uint32_t m = 1; m <= 2; m++) {
for (uint32_t n = 1; n <= 4; n++) {
GemmMicrokernelTester()
@@ -76931,12 +76931,12 @@
.n(n)
.k(1)
.iterations(1)
- .Test(xnn_qc8_igemm_minmax_fp32_ukernel_2x4__scalar_lrint, xnn_init_qs8_minmax_scalar_lrint_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qc8_igemm_minmax_fp32_ukernel_2x4__scalar_lrintf, xnn_init_qs8_minmax_scalar_lrintf_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
}
-TEST(QC8_IGEMM_MINMAX_FP32_2X4__SCALAR_LRINT, k_eq_1_subtile_m) {
+TEST(QC8_IGEMM_MINMAX_FP32_2X4__SCALAR_LRINTF, k_eq_1_subtile_m) {
for (uint32_t m = 1; m <= 2; m++) {
GemmMicrokernelTester()
.mr(2)
@@ -76947,11 +76947,11 @@
.n(4)
.k(1)
.iterations(1)
- .Test(xnn_qc8_igemm_minmax_fp32_ukernel_2x4__scalar_lrint, xnn_init_qs8_minmax_scalar_lrint_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qc8_igemm_minmax_fp32_ukernel_2x4__scalar_lrintf, xnn_init_qs8_minmax_scalar_lrintf_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
-TEST(QC8_IGEMM_MINMAX_FP32_2X4__SCALAR_LRINT, k_eq_1_subtile_n) {
+TEST(QC8_IGEMM_MINMAX_FP32_2X4__SCALAR_LRINTF, k_eq_1_subtile_n) {
for (uint32_t n = 1; n <= 4; n++) {
GemmMicrokernelTester()
.mr(2)
@@ -76962,11 +76962,11 @@
.n(n)
.k(1)
.iterations(1)
- .Test(xnn_qc8_igemm_minmax_fp32_ukernel_2x4__scalar_lrint, xnn_init_qs8_minmax_scalar_lrint_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qc8_igemm_minmax_fp32_ukernel_2x4__scalar_lrintf, xnn_init_qs8_minmax_scalar_lrintf_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
-TEST(QC8_IGEMM_MINMAX_FP32_2X4__SCALAR_LRINT, k_gt_1) {
+TEST(QC8_IGEMM_MINMAX_FP32_2X4__SCALAR_LRINTF, k_gt_1) {
for (size_t k = 2; k < 10; k++) {
GemmMicrokernelTester()
.mr(2)
@@ -76976,11 +76976,11 @@
.m(2)
.n(4)
.k(k)
- .Test(xnn_qc8_igemm_minmax_fp32_ukernel_2x4__scalar_lrint, xnn_init_qs8_minmax_scalar_lrint_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qc8_igemm_minmax_fp32_ukernel_2x4__scalar_lrintf, xnn_init_qs8_minmax_scalar_lrintf_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
-TEST(QC8_IGEMM_MINMAX_FP32_2X4__SCALAR_LRINT, k_gt_1_subtile) {
+TEST(QC8_IGEMM_MINMAX_FP32_2X4__SCALAR_LRINTF, k_gt_1_subtile) {
for (size_t k = 2; k < 10; k++) {
for (uint32_t m = 1; m <= 2; m++) {
for (uint32_t n = 1; n <= 4; n++) {
@@ -76993,13 +76993,13 @@
.n(n)
.k(k)
.iterations(1)
- .Test(xnn_qc8_igemm_minmax_fp32_ukernel_2x4__scalar_lrint, xnn_init_qs8_minmax_scalar_lrint_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qc8_igemm_minmax_fp32_ukernel_2x4__scalar_lrintf, xnn_init_qs8_minmax_scalar_lrintf_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
}
}
-TEST(QC8_IGEMM_MINMAX_FP32_2X4__SCALAR_LRINT, n_gt_4) {
+TEST(QC8_IGEMM_MINMAX_FP32_2X4__SCALAR_LRINTF, n_gt_4) {
for (uint32_t n = 5; n < 8; n++) {
for (size_t k = 1; k <= 5; k += 2) {
GemmMicrokernelTester()
@@ -77010,12 +77010,12 @@
.m(2)
.n(4)
.k(k)
- .Test(xnn_qc8_igemm_minmax_fp32_ukernel_2x4__scalar_lrint, xnn_init_qs8_minmax_scalar_lrint_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qc8_igemm_minmax_fp32_ukernel_2x4__scalar_lrintf, xnn_init_qs8_minmax_scalar_lrintf_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
}
-TEST(QC8_IGEMM_MINMAX_FP32_2X4__SCALAR_LRINT, n_gt_4_strided_cn) {
+TEST(QC8_IGEMM_MINMAX_FP32_2X4__SCALAR_LRINTF, n_gt_4_strided_cn) {
for (uint32_t n = 5; n < 8; n++) {
for (size_t k = 1; k <= 5; k += 2) {
GemmMicrokernelTester()
@@ -77027,12 +77027,12 @@
.n(4)
.k(k)
.cn_stride(7)
- .Test(xnn_qc8_igemm_minmax_fp32_ukernel_2x4__scalar_lrint, xnn_init_qs8_minmax_scalar_lrint_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qc8_igemm_minmax_fp32_ukernel_2x4__scalar_lrintf, xnn_init_qs8_minmax_scalar_lrintf_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
}
-TEST(QC8_IGEMM_MINMAX_FP32_2X4__SCALAR_LRINT, n_gt_4_subtile) {
+TEST(QC8_IGEMM_MINMAX_FP32_2X4__SCALAR_LRINTF, n_gt_4_subtile) {
for (uint32_t n = 5; n < 8; n++) {
for (size_t k = 1; k <= 5; k += 2) {
for (uint32_t m = 1; m <= 2; m++) {
@@ -77045,13 +77045,13 @@
.n(n)
.k(k)
.iterations(1)
- .Test(xnn_qc8_igemm_minmax_fp32_ukernel_2x4__scalar_lrint, xnn_init_qs8_minmax_scalar_lrint_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qc8_igemm_minmax_fp32_ukernel_2x4__scalar_lrintf, xnn_init_qs8_minmax_scalar_lrintf_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
}
}
-TEST(QC8_IGEMM_MINMAX_FP32_2X4__SCALAR_LRINT, n_div_4) {
+TEST(QC8_IGEMM_MINMAX_FP32_2X4__SCALAR_LRINTF, n_div_4) {
for (uint32_t n = 8; n <= 12; n += 4) {
for (size_t k = 1; k <= 5; k += 2) {
GemmMicrokernelTester()
@@ -77062,12 +77062,12 @@
.m(2)
.n(4)
.k(k)
- .Test(xnn_qc8_igemm_minmax_fp32_ukernel_2x4__scalar_lrint, xnn_init_qs8_minmax_scalar_lrint_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qc8_igemm_minmax_fp32_ukernel_2x4__scalar_lrintf, xnn_init_qs8_minmax_scalar_lrintf_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
}
-TEST(QC8_IGEMM_MINMAX_FP32_2X4__SCALAR_LRINT, n_div_4_strided_cn) {
+TEST(QC8_IGEMM_MINMAX_FP32_2X4__SCALAR_LRINTF, n_div_4_strided_cn) {
for (uint32_t n = 8; n <= 12; n += 4) {
for (size_t k = 1; k <= 5; k += 2) {
GemmMicrokernelTester()
@@ -77079,12 +77079,12 @@
.n(n)
.k(k)
.cn_stride(7)
- .Test(xnn_qc8_igemm_minmax_fp32_ukernel_2x4__scalar_lrint, xnn_init_qs8_minmax_scalar_lrint_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qc8_igemm_minmax_fp32_ukernel_2x4__scalar_lrintf, xnn_init_qs8_minmax_scalar_lrintf_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
}
-TEST(QC8_IGEMM_MINMAX_FP32_2X4__SCALAR_LRINT, n_div_4_subtile) {
+TEST(QC8_IGEMM_MINMAX_FP32_2X4__SCALAR_LRINTF, n_div_4_subtile) {
for (uint32_t n = 8; n <= 12; n += 4) {
for (size_t k = 1; k <= 5; k += 2) {
for (uint32_t m = 1; m <= 2; m++) {
@@ -77097,13 +77097,13 @@
.n(n)
.k(k)
.iterations(1)
- .Test(xnn_qc8_igemm_minmax_fp32_ukernel_2x4__scalar_lrint, xnn_init_qs8_minmax_scalar_lrint_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qc8_igemm_minmax_fp32_ukernel_2x4__scalar_lrintf, xnn_init_qs8_minmax_scalar_lrintf_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
}
}
-TEST(QC8_IGEMM_MINMAX_FP32_2X4__SCALAR_LRINT, small_kernel) {
+TEST(QC8_IGEMM_MINMAX_FP32_2X4__SCALAR_LRINTF, small_kernel) {
for (size_t k = 1; k <= 5; k += 2) {
GemmMicrokernelTester()
.mr(2)
@@ -77114,11 +77114,11 @@
.n(4)
.k(k)
.ks(3)
- .Test(xnn_qc8_igemm_minmax_fp32_ukernel_2x4__scalar_lrint, xnn_init_qs8_minmax_scalar_lrint_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qc8_igemm_minmax_fp32_ukernel_2x4__scalar_lrintf, xnn_init_qs8_minmax_scalar_lrintf_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
-TEST(QC8_IGEMM_MINMAX_FP32_2X4__SCALAR_LRINT, small_kernel_subtile) {
+TEST(QC8_IGEMM_MINMAX_FP32_2X4__SCALAR_LRINTF, small_kernel_subtile) {
for (size_t k = 1; k <= 5; k += 2) {
for (uint32_t m = 1; m <= 2; m++) {
for (uint32_t n = 1; n <= 4; n++) {
@@ -77132,13 +77132,13 @@
.k(k)
.ks(3)
.iterations(1)
- .Test(xnn_qc8_igemm_minmax_fp32_ukernel_2x4__scalar_lrint, xnn_init_qs8_minmax_scalar_lrint_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qc8_igemm_minmax_fp32_ukernel_2x4__scalar_lrintf, xnn_init_qs8_minmax_scalar_lrintf_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
}
}
-TEST(QC8_IGEMM_MINMAX_FP32_2X4__SCALAR_LRINT, n_gt_4_small_kernel) {
+TEST(QC8_IGEMM_MINMAX_FP32_2X4__SCALAR_LRINTF, n_gt_4_small_kernel) {
for (uint32_t n = 5; n < 8; n++) {
for (size_t k = 1; k <= 5; k += 2) {
GemmMicrokernelTester()
@@ -77150,12 +77150,12 @@
.n(4)
.k(k)
.ks(3)
- .Test(xnn_qc8_igemm_minmax_fp32_ukernel_2x4__scalar_lrint, xnn_init_qs8_minmax_scalar_lrint_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qc8_igemm_minmax_fp32_ukernel_2x4__scalar_lrintf, xnn_init_qs8_minmax_scalar_lrintf_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
}
-TEST(QC8_IGEMM_MINMAX_FP32_2X4__SCALAR_LRINT, n_div_4_small_kernel) {
+TEST(QC8_IGEMM_MINMAX_FP32_2X4__SCALAR_LRINTF, n_div_4_small_kernel) {
for (uint32_t n = 8; n <= 12; n += 4) {
for (size_t k = 1; k <= 5; k += 2) {
GemmMicrokernelTester()
@@ -77167,12 +77167,12 @@
.n(4)
.k(k)
.ks(3)
- .Test(xnn_qc8_igemm_minmax_fp32_ukernel_2x4__scalar_lrint, xnn_init_qs8_minmax_scalar_lrint_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qc8_igemm_minmax_fp32_ukernel_2x4__scalar_lrintf, xnn_init_qs8_minmax_scalar_lrintf_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
}
-TEST(QC8_IGEMM_MINMAX_FP32_2X4__SCALAR_LRINT, strided_cm_subtile) {
+TEST(QC8_IGEMM_MINMAX_FP32_2X4__SCALAR_LRINTF, strided_cm_subtile) {
for (size_t k = 1; k <= 5; k += 2) {
for (uint32_t m = 1; m <= 2; m++) {
for (uint32_t n = 1; n <= 4; n++) {
@@ -77186,13 +77186,13 @@
.k(k)
.cm_stride(7)
.iterations(1)
- .Test(xnn_qc8_igemm_minmax_fp32_ukernel_2x4__scalar_lrint, xnn_init_qs8_minmax_scalar_lrint_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qc8_igemm_minmax_fp32_ukernel_2x4__scalar_lrintf, xnn_init_qs8_minmax_scalar_lrintf_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
}
}
-TEST(QC8_IGEMM_MINMAX_FP32_2X4__SCALAR_LRINT, a_offset) {
+TEST(QC8_IGEMM_MINMAX_FP32_2X4__SCALAR_LRINTF, a_offset) {
for (size_t k = 1; k <= 5; k += 2) {
GemmMicrokernelTester()
.mr(2)
@@ -77204,11 +77204,11 @@
.k(k)
.ks(3)
.a_offset(13)
- .Test(xnn_qc8_igemm_minmax_fp32_ukernel_2x4__scalar_lrint, xnn_init_qs8_minmax_scalar_lrint_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qc8_igemm_minmax_fp32_ukernel_2x4__scalar_lrintf, xnn_init_qs8_minmax_scalar_lrintf_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
-TEST(QC8_IGEMM_MINMAX_FP32_2X4__SCALAR_LRINT, zero) {
+TEST(QC8_IGEMM_MINMAX_FP32_2X4__SCALAR_LRINTF, zero) {
for (uint32_t mz = 0; mz < 2; mz++) {
for (size_t k = 1; k <= 5; k += 2) {
GemmMicrokernelTester()
@@ -77222,12 +77222,12 @@
.ks(3)
.a_offset(13)
.zero_index(mz)
- .Test(xnn_qc8_igemm_minmax_fp32_ukernel_2x4__scalar_lrint, xnn_init_qs8_minmax_scalar_lrint_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qc8_igemm_minmax_fp32_ukernel_2x4__scalar_lrintf, xnn_init_qs8_minmax_scalar_lrintf_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
}
-TEST(QC8_IGEMM_MINMAX_FP32_2X4__SCALAR_LRINT, qmin) {
+TEST(QC8_IGEMM_MINMAX_FP32_2X4__SCALAR_LRINTF, qmin) {
GemmMicrokernelTester()
.mr(2)
.nr(4)
@@ -77237,10 +77237,10 @@
.n(4)
.k(1)
.qmin(128)
- .Test(xnn_qc8_igemm_minmax_fp32_ukernel_2x4__scalar_lrint, xnn_init_qs8_minmax_scalar_lrint_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qc8_igemm_minmax_fp32_ukernel_2x4__scalar_lrintf, xnn_init_qs8_minmax_scalar_lrintf_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
-TEST(QC8_IGEMM_MINMAX_FP32_2X4__SCALAR_LRINT, qmax) {
+TEST(QC8_IGEMM_MINMAX_FP32_2X4__SCALAR_LRINTF, qmax) {
GemmMicrokernelTester()
.mr(2)
.nr(4)
@@ -77250,10 +77250,10 @@
.n(4)
.k(1)
.qmax(128)
- .Test(xnn_qc8_igemm_minmax_fp32_ukernel_2x4__scalar_lrint, xnn_init_qs8_minmax_scalar_lrint_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qc8_igemm_minmax_fp32_ukernel_2x4__scalar_lrintf, xnn_init_qs8_minmax_scalar_lrintf_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
-TEST(QC8_IGEMM_MINMAX_FP32_2X4__SCALAR_LRINT, strided_cm) {
+TEST(QC8_IGEMM_MINMAX_FP32_2X4__SCALAR_LRINTF, strided_cm) {
GemmMicrokernelTester()
.mr(2)
.nr(4)
@@ -77263,11 +77263,11 @@
.n(4)
.k(1)
.cm_stride(7)
- .Test(xnn_qc8_igemm_minmax_fp32_ukernel_2x4__scalar_lrint, xnn_init_qs8_minmax_scalar_lrint_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qc8_igemm_minmax_fp32_ukernel_2x4__scalar_lrintf, xnn_init_qs8_minmax_scalar_lrintf_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
-TEST(QC8_IGEMM_MINMAX_FP32_3X4__SCALAR_LRINT, k_eq_1) {
+TEST(QC8_IGEMM_MINMAX_FP32_3X4__SCALAR_LRINTF, k_eq_1) {
GemmMicrokernelTester()
.mr(3)
.nr(4)
@@ -77276,10 +77276,10 @@
.m(3)
.n(4)
.k(1)
- .Test(xnn_qc8_igemm_minmax_fp32_ukernel_3x4__scalar_lrint, xnn_init_qs8_minmax_scalar_lrint_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qc8_igemm_minmax_fp32_ukernel_3x4__scalar_lrintf, xnn_init_qs8_minmax_scalar_lrintf_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
-TEST(QC8_IGEMM_MINMAX_FP32_3X4__SCALAR_LRINT, strided_cn) {
+TEST(QC8_IGEMM_MINMAX_FP32_3X4__SCALAR_LRINTF, strided_cn) {
GemmMicrokernelTester()
.mr(3)
.nr(4)
@@ -77289,10 +77289,10 @@
.n(4)
.k(1)
.cn_stride(7)
- .Test(xnn_qc8_igemm_minmax_fp32_ukernel_3x4__scalar_lrint, xnn_init_qs8_minmax_scalar_lrint_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qc8_igemm_minmax_fp32_ukernel_3x4__scalar_lrintf, xnn_init_qs8_minmax_scalar_lrintf_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
-TEST(QC8_IGEMM_MINMAX_FP32_3X4__SCALAR_LRINT, k_eq_1_subtile) {
+TEST(QC8_IGEMM_MINMAX_FP32_3X4__SCALAR_LRINTF, k_eq_1_subtile) {
for (uint32_t m = 1; m <= 3; m++) {
for (uint32_t n = 1; n <= 4; n++) {
GemmMicrokernelTester()
@@ -77304,12 +77304,12 @@
.n(n)
.k(1)
.iterations(1)
- .Test(xnn_qc8_igemm_minmax_fp32_ukernel_3x4__scalar_lrint, xnn_init_qs8_minmax_scalar_lrint_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qc8_igemm_minmax_fp32_ukernel_3x4__scalar_lrintf, xnn_init_qs8_minmax_scalar_lrintf_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
}
-TEST(QC8_IGEMM_MINMAX_FP32_3X4__SCALAR_LRINT, k_eq_1_subtile_m) {
+TEST(QC8_IGEMM_MINMAX_FP32_3X4__SCALAR_LRINTF, k_eq_1_subtile_m) {
for (uint32_t m = 1; m <= 3; m++) {
GemmMicrokernelTester()
.mr(3)
@@ -77320,11 +77320,11 @@
.n(4)
.k(1)
.iterations(1)
- .Test(xnn_qc8_igemm_minmax_fp32_ukernel_3x4__scalar_lrint, xnn_init_qs8_minmax_scalar_lrint_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qc8_igemm_minmax_fp32_ukernel_3x4__scalar_lrintf, xnn_init_qs8_minmax_scalar_lrintf_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
-TEST(QC8_IGEMM_MINMAX_FP32_3X4__SCALAR_LRINT, k_eq_1_subtile_n) {
+TEST(QC8_IGEMM_MINMAX_FP32_3X4__SCALAR_LRINTF, k_eq_1_subtile_n) {
for (uint32_t n = 1; n <= 4; n++) {
GemmMicrokernelTester()
.mr(3)
@@ -77335,11 +77335,11 @@
.n(n)
.k(1)
.iterations(1)
- .Test(xnn_qc8_igemm_minmax_fp32_ukernel_3x4__scalar_lrint, xnn_init_qs8_minmax_scalar_lrint_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qc8_igemm_minmax_fp32_ukernel_3x4__scalar_lrintf, xnn_init_qs8_minmax_scalar_lrintf_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
-TEST(QC8_IGEMM_MINMAX_FP32_3X4__SCALAR_LRINT, k_gt_1) {
+TEST(QC8_IGEMM_MINMAX_FP32_3X4__SCALAR_LRINTF, k_gt_1) {
for (size_t k = 2; k < 10; k++) {
GemmMicrokernelTester()
.mr(3)
@@ -77349,11 +77349,11 @@
.m(3)
.n(4)
.k(k)
- .Test(xnn_qc8_igemm_minmax_fp32_ukernel_3x4__scalar_lrint, xnn_init_qs8_minmax_scalar_lrint_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qc8_igemm_minmax_fp32_ukernel_3x4__scalar_lrintf, xnn_init_qs8_minmax_scalar_lrintf_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
-TEST(QC8_IGEMM_MINMAX_FP32_3X4__SCALAR_LRINT, k_gt_1_subtile) {
+TEST(QC8_IGEMM_MINMAX_FP32_3X4__SCALAR_LRINTF, k_gt_1_subtile) {
for (size_t k = 2; k < 10; k++) {
for (uint32_t m = 1; m <= 3; m++) {
for (uint32_t n = 1; n <= 4; n++) {
@@ -77366,13 +77366,13 @@
.n(n)
.k(k)
.iterations(1)
- .Test(xnn_qc8_igemm_minmax_fp32_ukernel_3x4__scalar_lrint, xnn_init_qs8_minmax_scalar_lrint_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qc8_igemm_minmax_fp32_ukernel_3x4__scalar_lrintf, xnn_init_qs8_minmax_scalar_lrintf_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
}
}
-TEST(QC8_IGEMM_MINMAX_FP32_3X4__SCALAR_LRINT, n_gt_4) {
+TEST(QC8_IGEMM_MINMAX_FP32_3X4__SCALAR_LRINTF, n_gt_4) {
for (uint32_t n = 5; n < 8; n++) {
for (size_t k = 1; k <= 5; k += 2) {
GemmMicrokernelTester()
@@ -77383,12 +77383,12 @@
.m(3)
.n(4)
.k(k)
- .Test(xnn_qc8_igemm_minmax_fp32_ukernel_3x4__scalar_lrint, xnn_init_qs8_minmax_scalar_lrint_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qc8_igemm_minmax_fp32_ukernel_3x4__scalar_lrintf, xnn_init_qs8_minmax_scalar_lrintf_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
}
-TEST(QC8_IGEMM_MINMAX_FP32_3X4__SCALAR_LRINT, n_gt_4_strided_cn) {
+TEST(QC8_IGEMM_MINMAX_FP32_3X4__SCALAR_LRINTF, n_gt_4_strided_cn) {
for (uint32_t n = 5; n < 8; n++) {
for (size_t k = 1; k <= 5; k += 2) {
GemmMicrokernelTester()
@@ -77400,12 +77400,12 @@
.n(4)
.k(k)
.cn_stride(7)
- .Test(xnn_qc8_igemm_minmax_fp32_ukernel_3x4__scalar_lrint, xnn_init_qs8_minmax_scalar_lrint_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qc8_igemm_minmax_fp32_ukernel_3x4__scalar_lrintf, xnn_init_qs8_minmax_scalar_lrintf_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
}
-TEST(QC8_IGEMM_MINMAX_FP32_3X4__SCALAR_LRINT, n_gt_4_subtile) {
+TEST(QC8_IGEMM_MINMAX_FP32_3X4__SCALAR_LRINTF, n_gt_4_subtile) {
for (uint32_t n = 5; n < 8; n++) {
for (size_t k = 1; k <= 5; k += 2) {
for (uint32_t m = 1; m <= 3; m++) {
@@ -77418,13 +77418,13 @@
.n(n)
.k(k)
.iterations(1)
- .Test(xnn_qc8_igemm_minmax_fp32_ukernel_3x4__scalar_lrint, xnn_init_qs8_minmax_scalar_lrint_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qc8_igemm_minmax_fp32_ukernel_3x4__scalar_lrintf, xnn_init_qs8_minmax_scalar_lrintf_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
}
}
-TEST(QC8_IGEMM_MINMAX_FP32_3X4__SCALAR_LRINT, n_div_4) {
+TEST(QC8_IGEMM_MINMAX_FP32_3X4__SCALAR_LRINTF, n_div_4) {
for (uint32_t n = 8; n <= 12; n += 4) {
for (size_t k = 1; k <= 5; k += 2) {
GemmMicrokernelTester()
@@ -77435,12 +77435,12 @@
.m(3)
.n(4)
.k(k)
- .Test(xnn_qc8_igemm_minmax_fp32_ukernel_3x4__scalar_lrint, xnn_init_qs8_minmax_scalar_lrint_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qc8_igemm_minmax_fp32_ukernel_3x4__scalar_lrintf, xnn_init_qs8_minmax_scalar_lrintf_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
}
-TEST(QC8_IGEMM_MINMAX_FP32_3X4__SCALAR_LRINT, n_div_4_strided_cn) {
+TEST(QC8_IGEMM_MINMAX_FP32_3X4__SCALAR_LRINTF, n_div_4_strided_cn) {
for (uint32_t n = 8; n <= 12; n += 4) {
for (size_t k = 1; k <= 5; k += 2) {
GemmMicrokernelTester()
@@ -77452,12 +77452,12 @@
.n(n)
.k(k)
.cn_stride(7)
- .Test(xnn_qc8_igemm_minmax_fp32_ukernel_3x4__scalar_lrint, xnn_init_qs8_minmax_scalar_lrint_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qc8_igemm_minmax_fp32_ukernel_3x4__scalar_lrintf, xnn_init_qs8_minmax_scalar_lrintf_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
}
-TEST(QC8_IGEMM_MINMAX_FP32_3X4__SCALAR_LRINT, n_div_4_subtile) {
+TEST(QC8_IGEMM_MINMAX_FP32_3X4__SCALAR_LRINTF, n_div_4_subtile) {
for (uint32_t n = 8; n <= 12; n += 4) {
for (size_t k = 1; k <= 5; k += 2) {
for (uint32_t m = 1; m <= 3; m++) {
@@ -77470,13 +77470,13 @@
.n(n)
.k(k)
.iterations(1)
- .Test(xnn_qc8_igemm_minmax_fp32_ukernel_3x4__scalar_lrint, xnn_init_qs8_minmax_scalar_lrint_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qc8_igemm_minmax_fp32_ukernel_3x4__scalar_lrintf, xnn_init_qs8_minmax_scalar_lrintf_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
}
}
-TEST(QC8_IGEMM_MINMAX_FP32_3X4__SCALAR_LRINT, small_kernel) {
+TEST(QC8_IGEMM_MINMAX_FP32_3X4__SCALAR_LRINTF, small_kernel) {
for (size_t k = 1; k <= 5; k += 2) {
GemmMicrokernelTester()
.mr(3)
@@ -77487,11 +77487,11 @@
.n(4)
.k(k)
.ks(3)
- .Test(xnn_qc8_igemm_minmax_fp32_ukernel_3x4__scalar_lrint, xnn_init_qs8_minmax_scalar_lrint_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qc8_igemm_minmax_fp32_ukernel_3x4__scalar_lrintf, xnn_init_qs8_minmax_scalar_lrintf_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
-TEST(QC8_IGEMM_MINMAX_FP32_3X4__SCALAR_LRINT, small_kernel_subtile) {
+TEST(QC8_IGEMM_MINMAX_FP32_3X4__SCALAR_LRINTF, small_kernel_subtile) {
for (size_t k = 1; k <= 5; k += 2) {
for (uint32_t m = 1; m <= 3; m++) {
for (uint32_t n = 1; n <= 4; n++) {
@@ -77505,13 +77505,13 @@
.k(k)
.ks(3)
.iterations(1)
- .Test(xnn_qc8_igemm_minmax_fp32_ukernel_3x4__scalar_lrint, xnn_init_qs8_minmax_scalar_lrint_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qc8_igemm_minmax_fp32_ukernel_3x4__scalar_lrintf, xnn_init_qs8_minmax_scalar_lrintf_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
}
}
-TEST(QC8_IGEMM_MINMAX_FP32_3X4__SCALAR_LRINT, n_gt_4_small_kernel) {
+TEST(QC8_IGEMM_MINMAX_FP32_3X4__SCALAR_LRINTF, n_gt_4_small_kernel) {
for (uint32_t n = 5; n < 8; n++) {
for (size_t k = 1; k <= 5; k += 2) {
GemmMicrokernelTester()
@@ -77523,12 +77523,12 @@
.n(4)
.k(k)
.ks(3)
- .Test(xnn_qc8_igemm_minmax_fp32_ukernel_3x4__scalar_lrint, xnn_init_qs8_minmax_scalar_lrint_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qc8_igemm_minmax_fp32_ukernel_3x4__scalar_lrintf, xnn_init_qs8_minmax_scalar_lrintf_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
}
-TEST(QC8_IGEMM_MINMAX_FP32_3X4__SCALAR_LRINT, n_div_4_small_kernel) {
+TEST(QC8_IGEMM_MINMAX_FP32_3X4__SCALAR_LRINTF, n_div_4_small_kernel) {
for (uint32_t n = 8; n <= 12; n += 4) {
for (size_t k = 1; k <= 5; k += 2) {
GemmMicrokernelTester()
@@ -77540,12 +77540,12 @@
.n(4)
.k(k)
.ks(3)
- .Test(xnn_qc8_igemm_minmax_fp32_ukernel_3x4__scalar_lrint, xnn_init_qs8_minmax_scalar_lrint_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qc8_igemm_minmax_fp32_ukernel_3x4__scalar_lrintf, xnn_init_qs8_minmax_scalar_lrintf_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
}
-TEST(QC8_IGEMM_MINMAX_FP32_3X4__SCALAR_LRINT, strided_cm_subtile) {
+TEST(QC8_IGEMM_MINMAX_FP32_3X4__SCALAR_LRINTF, strided_cm_subtile) {
for (size_t k = 1; k <= 5; k += 2) {
for (uint32_t m = 1; m <= 3; m++) {
for (uint32_t n = 1; n <= 4; n++) {
@@ -77559,13 +77559,13 @@
.k(k)
.cm_stride(7)
.iterations(1)
- .Test(xnn_qc8_igemm_minmax_fp32_ukernel_3x4__scalar_lrint, xnn_init_qs8_minmax_scalar_lrint_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qc8_igemm_minmax_fp32_ukernel_3x4__scalar_lrintf, xnn_init_qs8_minmax_scalar_lrintf_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
}
}
-TEST(QC8_IGEMM_MINMAX_FP32_3X4__SCALAR_LRINT, a_offset) {
+TEST(QC8_IGEMM_MINMAX_FP32_3X4__SCALAR_LRINTF, a_offset) {
for (size_t k = 1; k <= 5; k += 2) {
GemmMicrokernelTester()
.mr(3)
@@ -77577,11 +77577,11 @@
.k(k)
.ks(3)
.a_offset(17)
- .Test(xnn_qc8_igemm_minmax_fp32_ukernel_3x4__scalar_lrint, xnn_init_qs8_minmax_scalar_lrint_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qc8_igemm_minmax_fp32_ukernel_3x4__scalar_lrintf, xnn_init_qs8_minmax_scalar_lrintf_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
-TEST(QC8_IGEMM_MINMAX_FP32_3X4__SCALAR_LRINT, zero) {
+TEST(QC8_IGEMM_MINMAX_FP32_3X4__SCALAR_LRINTF, zero) {
for (uint32_t mz = 0; mz < 3; mz++) {
for (size_t k = 1; k <= 5; k += 2) {
GemmMicrokernelTester()
@@ -77595,12 +77595,12 @@
.ks(3)
.a_offset(17)
.zero_index(mz)
- .Test(xnn_qc8_igemm_minmax_fp32_ukernel_3x4__scalar_lrint, xnn_init_qs8_minmax_scalar_lrint_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qc8_igemm_minmax_fp32_ukernel_3x4__scalar_lrintf, xnn_init_qs8_minmax_scalar_lrintf_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
}
-TEST(QC8_IGEMM_MINMAX_FP32_3X4__SCALAR_LRINT, qmin) {
+TEST(QC8_IGEMM_MINMAX_FP32_3X4__SCALAR_LRINTF, qmin) {
GemmMicrokernelTester()
.mr(3)
.nr(4)
@@ -77610,10 +77610,10 @@
.n(4)
.k(1)
.qmin(128)
- .Test(xnn_qc8_igemm_minmax_fp32_ukernel_3x4__scalar_lrint, xnn_init_qs8_minmax_scalar_lrint_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qc8_igemm_minmax_fp32_ukernel_3x4__scalar_lrintf, xnn_init_qs8_minmax_scalar_lrintf_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
-TEST(QC8_IGEMM_MINMAX_FP32_3X4__SCALAR_LRINT, qmax) {
+TEST(QC8_IGEMM_MINMAX_FP32_3X4__SCALAR_LRINTF, qmax) {
GemmMicrokernelTester()
.mr(3)
.nr(4)
@@ -77623,10 +77623,10 @@
.n(4)
.k(1)
.qmax(128)
- .Test(xnn_qc8_igemm_minmax_fp32_ukernel_3x4__scalar_lrint, xnn_init_qs8_minmax_scalar_lrint_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qc8_igemm_minmax_fp32_ukernel_3x4__scalar_lrintf, xnn_init_qs8_minmax_scalar_lrintf_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
-TEST(QC8_IGEMM_MINMAX_FP32_3X4__SCALAR_LRINT, strided_cm) {
+TEST(QC8_IGEMM_MINMAX_FP32_3X4__SCALAR_LRINTF, strided_cm) {
GemmMicrokernelTester()
.mr(3)
.nr(4)
@@ -77636,11 +77636,11 @@
.n(4)
.k(1)
.cm_stride(7)
- .Test(xnn_qc8_igemm_minmax_fp32_ukernel_3x4__scalar_lrint, xnn_init_qs8_minmax_scalar_lrint_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qc8_igemm_minmax_fp32_ukernel_3x4__scalar_lrintf, xnn_init_qs8_minmax_scalar_lrintf_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
-TEST(QC8_IGEMM_MINMAX_FP32_4X4__SCALAR_LRINT, k_eq_1) {
+TEST(QC8_IGEMM_MINMAX_FP32_4X4__SCALAR_LRINTF, k_eq_1) {
GemmMicrokernelTester()
.mr(4)
.nr(4)
@@ -77649,10 +77649,10 @@
.m(4)
.n(4)
.k(1)
- .Test(xnn_qc8_igemm_minmax_fp32_ukernel_4x4__scalar_lrint, xnn_init_qs8_minmax_scalar_lrint_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qc8_igemm_minmax_fp32_ukernel_4x4__scalar_lrintf, xnn_init_qs8_minmax_scalar_lrintf_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
-TEST(QC8_IGEMM_MINMAX_FP32_4X4__SCALAR_LRINT, strided_cn) {
+TEST(QC8_IGEMM_MINMAX_FP32_4X4__SCALAR_LRINTF, strided_cn) {
GemmMicrokernelTester()
.mr(4)
.nr(4)
@@ -77662,10 +77662,10 @@
.n(4)
.k(1)
.cn_stride(7)
- .Test(xnn_qc8_igemm_minmax_fp32_ukernel_4x4__scalar_lrint, xnn_init_qs8_minmax_scalar_lrint_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qc8_igemm_minmax_fp32_ukernel_4x4__scalar_lrintf, xnn_init_qs8_minmax_scalar_lrintf_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
-TEST(QC8_IGEMM_MINMAX_FP32_4X4__SCALAR_LRINT, k_eq_1_subtile) {
+TEST(QC8_IGEMM_MINMAX_FP32_4X4__SCALAR_LRINTF, k_eq_1_subtile) {
for (uint32_t m = 1; m <= 4; m++) {
for (uint32_t n = 1; n <= 4; n++) {
GemmMicrokernelTester()
@@ -77677,12 +77677,12 @@
.n(n)
.k(1)
.iterations(1)
- .Test(xnn_qc8_igemm_minmax_fp32_ukernel_4x4__scalar_lrint, xnn_init_qs8_minmax_scalar_lrint_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qc8_igemm_minmax_fp32_ukernel_4x4__scalar_lrintf, xnn_init_qs8_minmax_scalar_lrintf_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
}
-TEST(QC8_IGEMM_MINMAX_FP32_4X4__SCALAR_LRINT, k_eq_1_subtile_m) {
+TEST(QC8_IGEMM_MINMAX_FP32_4X4__SCALAR_LRINTF, k_eq_1_subtile_m) {
for (uint32_t m = 1; m <= 4; m++) {
GemmMicrokernelTester()
.mr(4)
@@ -77693,11 +77693,11 @@
.n(4)
.k(1)
.iterations(1)
- .Test(xnn_qc8_igemm_minmax_fp32_ukernel_4x4__scalar_lrint, xnn_init_qs8_minmax_scalar_lrint_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qc8_igemm_minmax_fp32_ukernel_4x4__scalar_lrintf, xnn_init_qs8_minmax_scalar_lrintf_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
-TEST(QC8_IGEMM_MINMAX_FP32_4X4__SCALAR_LRINT, k_eq_1_subtile_n) {
+TEST(QC8_IGEMM_MINMAX_FP32_4X4__SCALAR_LRINTF, k_eq_1_subtile_n) {
for (uint32_t n = 1; n <= 4; n++) {
GemmMicrokernelTester()
.mr(4)
@@ -77708,11 +77708,11 @@
.n(n)
.k(1)
.iterations(1)
- .Test(xnn_qc8_igemm_minmax_fp32_ukernel_4x4__scalar_lrint, xnn_init_qs8_minmax_scalar_lrint_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qc8_igemm_minmax_fp32_ukernel_4x4__scalar_lrintf, xnn_init_qs8_minmax_scalar_lrintf_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
-TEST(QC8_IGEMM_MINMAX_FP32_4X4__SCALAR_LRINT, k_gt_1) {
+TEST(QC8_IGEMM_MINMAX_FP32_4X4__SCALAR_LRINTF, k_gt_1) {
for (size_t k = 2; k < 10; k++) {
GemmMicrokernelTester()
.mr(4)
@@ -77722,11 +77722,11 @@
.m(4)
.n(4)
.k(k)
- .Test(xnn_qc8_igemm_minmax_fp32_ukernel_4x4__scalar_lrint, xnn_init_qs8_minmax_scalar_lrint_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qc8_igemm_minmax_fp32_ukernel_4x4__scalar_lrintf, xnn_init_qs8_minmax_scalar_lrintf_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
-TEST(QC8_IGEMM_MINMAX_FP32_4X4__SCALAR_LRINT, k_gt_1_subtile) {
+TEST(QC8_IGEMM_MINMAX_FP32_4X4__SCALAR_LRINTF, k_gt_1_subtile) {
for (size_t k = 2; k < 10; k++) {
for (uint32_t m = 1; m <= 4; m++) {
for (uint32_t n = 1; n <= 4; n++) {
@@ -77739,13 +77739,13 @@
.n(n)
.k(k)
.iterations(1)
- .Test(xnn_qc8_igemm_minmax_fp32_ukernel_4x4__scalar_lrint, xnn_init_qs8_minmax_scalar_lrint_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qc8_igemm_minmax_fp32_ukernel_4x4__scalar_lrintf, xnn_init_qs8_minmax_scalar_lrintf_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
}
}
-TEST(QC8_IGEMM_MINMAX_FP32_4X4__SCALAR_LRINT, n_gt_4) {
+TEST(QC8_IGEMM_MINMAX_FP32_4X4__SCALAR_LRINTF, n_gt_4) {
for (uint32_t n = 5; n < 8; n++) {
for (size_t k = 1; k <= 5; k += 2) {
GemmMicrokernelTester()
@@ -77756,12 +77756,12 @@
.m(4)
.n(4)
.k(k)
- .Test(xnn_qc8_igemm_minmax_fp32_ukernel_4x4__scalar_lrint, xnn_init_qs8_minmax_scalar_lrint_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qc8_igemm_minmax_fp32_ukernel_4x4__scalar_lrintf, xnn_init_qs8_minmax_scalar_lrintf_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
}
-TEST(QC8_IGEMM_MINMAX_FP32_4X4__SCALAR_LRINT, n_gt_4_strided_cn) {
+TEST(QC8_IGEMM_MINMAX_FP32_4X4__SCALAR_LRINTF, n_gt_4_strided_cn) {
for (uint32_t n = 5; n < 8; n++) {
for (size_t k = 1; k <= 5; k += 2) {
GemmMicrokernelTester()
@@ -77773,12 +77773,12 @@
.n(4)
.k(k)
.cn_stride(7)
- .Test(xnn_qc8_igemm_minmax_fp32_ukernel_4x4__scalar_lrint, xnn_init_qs8_minmax_scalar_lrint_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qc8_igemm_minmax_fp32_ukernel_4x4__scalar_lrintf, xnn_init_qs8_minmax_scalar_lrintf_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
}
-TEST(QC8_IGEMM_MINMAX_FP32_4X4__SCALAR_LRINT, n_gt_4_subtile) {
+TEST(QC8_IGEMM_MINMAX_FP32_4X4__SCALAR_LRINTF, n_gt_4_subtile) {
for (uint32_t n = 5; n < 8; n++) {
for (size_t k = 1; k <= 5; k += 2) {
for (uint32_t m = 1; m <= 4; m++) {
@@ -77791,13 +77791,13 @@
.n(n)
.k(k)
.iterations(1)
- .Test(xnn_qc8_igemm_minmax_fp32_ukernel_4x4__scalar_lrint, xnn_init_qs8_minmax_scalar_lrint_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qc8_igemm_minmax_fp32_ukernel_4x4__scalar_lrintf, xnn_init_qs8_minmax_scalar_lrintf_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
}
}
-TEST(QC8_IGEMM_MINMAX_FP32_4X4__SCALAR_LRINT, n_div_4) {
+TEST(QC8_IGEMM_MINMAX_FP32_4X4__SCALAR_LRINTF, n_div_4) {
for (uint32_t n = 8; n <= 12; n += 4) {
for (size_t k = 1; k <= 5; k += 2) {
GemmMicrokernelTester()
@@ -77808,12 +77808,12 @@
.m(4)
.n(4)
.k(k)
- .Test(xnn_qc8_igemm_minmax_fp32_ukernel_4x4__scalar_lrint, xnn_init_qs8_minmax_scalar_lrint_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qc8_igemm_minmax_fp32_ukernel_4x4__scalar_lrintf, xnn_init_qs8_minmax_scalar_lrintf_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
}
-TEST(QC8_IGEMM_MINMAX_FP32_4X4__SCALAR_LRINT, n_div_4_strided_cn) {
+TEST(QC8_IGEMM_MINMAX_FP32_4X4__SCALAR_LRINTF, n_div_4_strided_cn) {
for (uint32_t n = 8; n <= 12; n += 4) {
for (size_t k = 1; k <= 5; k += 2) {
GemmMicrokernelTester()
@@ -77825,12 +77825,12 @@
.n(n)
.k(k)
.cn_stride(7)
- .Test(xnn_qc8_igemm_minmax_fp32_ukernel_4x4__scalar_lrint, xnn_init_qs8_minmax_scalar_lrint_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qc8_igemm_minmax_fp32_ukernel_4x4__scalar_lrintf, xnn_init_qs8_minmax_scalar_lrintf_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
}
-TEST(QC8_IGEMM_MINMAX_FP32_4X4__SCALAR_LRINT, n_div_4_subtile) {
+TEST(QC8_IGEMM_MINMAX_FP32_4X4__SCALAR_LRINTF, n_div_4_subtile) {
for (uint32_t n = 8; n <= 12; n += 4) {
for (size_t k = 1; k <= 5; k += 2) {
for (uint32_t m = 1; m <= 4; m++) {
@@ -77843,13 +77843,13 @@
.n(n)
.k(k)
.iterations(1)
- .Test(xnn_qc8_igemm_minmax_fp32_ukernel_4x4__scalar_lrint, xnn_init_qs8_minmax_scalar_lrint_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qc8_igemm_minmax_fp32_ukernel_4x4__scalar_lrintf, xnn_init_qs8_minmax_scalar_lrintf_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
}
}
-TEST(QC8_IGEMM_MINMAX_FP32_4X4__SCALAR_LRINT, small_kernel) {
+TEST(QC8_IGEMM_MINMAX_FP32_4X4__SCALAR_LRINTF, small_kernel) {
for (size_t k = 1; k <= 5; k += 2) {
GemmMicrokernelTester()
.mr(4)
@@ -77860,11 +77860,11 @@
.n(4)
.k(k)
.ks(3)
- .Test(xnn_qc8_igemm_minmax_fp32_ukernel_4x4__scalar_lrint, xnn_init_qs8_minmax_scalar_lrint_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qc8_igemm_minmax_fp32_ukernel_4x4__scalar_lrintf, xnn_init_qs8_minmax_scalar_lrintf_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
-TEST(QC8_IGEMM_MINMAX_FP32_4X4__SCALAR_LRINT, small_kernel_subtile) {
+TEST(QC8_IGEMM_MINMAX_FP32_4X4__SCALAR_LRINTF, small_kernel_subtile) {
for (size_t k = 1; k <= 5; k += 2) {
for (uint32_t m = 1; m <= 4; m++) {
for (uint32_t n = 1; n <= 4; n++) {
@@ -77878,13 +77878,13 @@
.k(k)
.ks(3)
.iterations(1)
- .Test(xnn_qc8_igemm_minmax_fp32_ukernel_4x4__scalar_lrint, xnn_init_qs8_minmax_scalar_lrint_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qc8_igemm_minmax_fp32_ukernel_4x4__scalar_lrintf, xnn_init_qs8_minmax_scalar_lrintf_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
}
}
-TEST(QC8_IGEMM_MINMAX_FP32_4X4__SCALAR_LRINT, n_gt_4_small_kernel) {
+TEST(QC8_IGEMM_MINMAX_FP32_4X4__SCALAR_LRINTF, n_gt_4_small_kernel) {
for (uint32_t n = 5; n < 8; n++) {
for (size_t k = 1; k <= 5; k += 2) {
GemmMicrokernelTester()
@@ -77896,12 +77896,12 @@
.n(4)
.k(k)
.ks(3)
- .Test(xnn_qc8_igemm_minmax_fp32_ukernel_4x4__scalar_lrint, xnn_init_qs8_minmax_scalar_lrint_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qc8_igemm_minmax_fp32_ukernel_4x4__scalar_lrintf, xnn_init_qs8_minmax_scalar_lrintf_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
}
-TEST(QC8_IGEMM_MINMAX_FP32_4X4__SCALAR_LRINT, n_div_4_small_kernel) {
+TEST(QC8_IGEMM_MINMAX_FP32_4X4__SCALAR_LRINTF, n_div_4_small_kernel) {
for (uint32_t n = 8; n <= 12; n += 4) {
for (size_t k = 1; k <= 5; k += 2) {
GemmMicrokernelTester()
@@ -77913,12 +77913,12 @@
.n(4)
.k(k)
.ks(3)
- .Test(xnn_qc8_igemm_minmax_fp32_ukernel_4x4__scalar_lrint, xnn_init_qs8_minmax_scalar_lrint_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qc8_igemm_minmax_fp32_ukernel_4x4__scalar_lrintf, xnn_init_qs8_minmax_scalar_lrintf_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
}
-TEST(QC8_IGEMM_MINMAX_FP32_4X4__SCALAR_LRINT, strided_cm_subtile) {
+TEST(QC8_IGEMM_MINMAX_FP32_4X4__SCALAR_LRINTF, strided_cm_subtile) {
for (size_t k = 1; k <= 5; k += 2) {
for (uint32_t m = 1; m <= 4; m++) {
for (uint32_t n = 1; n <= 4; n++) {
@@ -77932,13 +77932,13 @@
.k(k)
.cm_stride(7)
.iterations(1)
- .Test(xnn_qc8_igemm_minmax_fp32_ukernel_4x4__scalar_lrint, xnn_init_qs8_minmax_scalar_lrint_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qc8_igemm_minmax_fp32_ukernel_4x4__scalar_lrintf, xnn_init_qs8_minmax_scalar_lrintf_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
}
}
-TEST(QC8_IGEMM_MINMAX_FP32_4X4__SCALAR_LRINT, a_offset) {
+TEST(QC8_IGEMM_MINMAX_FP32_4X4__SCALAR_LRINTF, a_offset) {
for (size_t k = 1; k <= 5; k += 2) {
GemmMicrokernelTester()
.mr(4)
@@ -77950,11 +77950,11 @@
.k(k)
.ks(3)
.a_offset(23)
- .Test(xnn_qc8_igemm_minmax_fp32_ukernel_4x4__scalar_lrint, xnn_init_qs8_minmax_scalar_lrint_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qc8_igemm_minmax_fp32_ukernel_4x4__scalar_lrintf, xnn_init_qs8_minmax_scalar_lrintf_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
-TEST(QC8_IGEMM_MINMAX_FP32_4X4__SCALAR_LRINT, zero) {
+TEST(QC8_IGEMM_MINMAX_FP32_4X4__SCALAR_LRINTF, zero) {
for (uint32_t mz = 0; mz < 4; mz++) {
for (size_t k = 1; k <= 5; k += 2) {
GemmMicrokernelTester()
@@ -77968,12 +77968,12 @@
.ks(3)
.a_offset(23)
.zero_index(mz)
- .Test(xnn_qc8_igemm_minmax_fp32_ukernel_4x4__scalar_lrint, xnn_init_qs8_minmax_scalar_lrint_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qc8_igemm_minmax_fp32_ukernel_4x4__scalar_lrintf, xnn_init_qs8_minmax_scalar_lrintf_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
}
-TEST(QC8_IGEMM_MINMAX_FP32_4X4__SCALAR_LRINT, qmin) {
+TEST(QC8_IGEMM_MINMAX_FP32_4X4__SCALAR_LRINTF, qmin) {
GemmMicrokernelTester()
.mr(4)
.nr(4)
@@ -77983,10 +77983,10 @@
.n(4)
.k(1)
.qmin(128)
- .Test(xnn_qc8_igemm_minmax_fp32_ukernel_4x4__scalar_lrint, xnn_init_qs8_minmax_scalar_lrint_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qc8_igemm_minmax_fp32_ukernel_4x4__scalar_lrintf, xnn_init_qs8_minmax_scalar_lrintf_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
-TEST(QC8_IGEMM_MINMAX_FP32_4X4__SCALAR_LRINT, qmax) {
+TEST(QC8_IGEMM_MINMAX_FP32_4X4__SCALAR_LRINTF, qmax) {
GemmMicrokernelTester()
.mr(4)
.nr(4)
@@ -77996,10 +77996,10 @@
.n(4)
.k(1)
.qmax(128)
- .Test(xnn_qc8_igemm_minmax_fp32_ukernel_4x4__scalar_lrint, xnn_init_qs8_minmax_scalar_lrint_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qc8_igemm_minmax_fp32_ukernel_4x4__scalar_lrintf, xnn_init_qs8_minmax_scalar_lrintf_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
-TEST(QC8_IGEMM_MINMAX_FP32_4X4__SCALAR_LRINT, strided_cm) {
+TEST(QC8_IGEMM_MINMAX_FP32_4X4__SCALAR_LRINTF, strided_cm) {
GemmMicrokernelTester()
.mr(4)
.nr(4)
@@ -78009,11 +78009,11 @@
.n(4)
.k(1)
.cm_stride(7)
- .Test(xnn_qc8_igemm_minmax_fp32_ukernel_4x4__scalar_lrint, xnn_init_qs8_minmax_scalar_lrint_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qc8_igemm_minmax_fp32_ukernel_4x4__scalar_lrintf, xnn_init_qs8_minmax_scalar_lrintf_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
-TEST(QC8_IGEMM_MINMAX_FP32_1X2__SCALAR_MAGIC, k_eq_1) {
+TEST(QC8_IGEMM_MINMAX_FP32_1X2__SCALAR_FMAGIC, k_eq_1) {
GemmMicrokernelTester()
.mr(1)
.nr(2)
@@ -78022,10 +78022,10 @@
.m(1)
.n(2)
.k(1)
- .Test(xnn_qc8_igemm_minmax_fp32_ukernel_1x2__scalar_magic, xnn_init_qs8_minmax_scalar_magic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qc8_igemm_minmax_fp32_ukernel_1x2__scalar_fmagic, xnn_init_qs8_minmax_scalar_fmagic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
-TEST(QC8_IGEMM_MINMAX_FP32_1X2__SCALAR_MAGIC, strided_cn) {
+TEST(QC8_IGEMM_MINMAX_FP32_1X2__SCALAR_FMAGIC, strided_cn) {
GemmMicrokernelTester()
.mr(1)
.nr(2)
@@ -78035,10 +78035,10 @@
.n(2)
.k(1)
.cn_stride(5)
- .Test(xnn_qc8_igemm_minmax_fp32_ukernel_1x2__scalar_magic, xnn_init_qs8_minmax_scalar_magic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qc8_igemm_minmax_fp32_ukernel_1x2__scalar_fmagic, xnn_init_qs8_minmax_scalar_fmagic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
-TEST(QC8_IGEMM_MINMAX_FP32_1X2__SCALAR_MAGIC, k_eq_1_subtile) {
+TEST(QC8_IGEMM_MINMAX_FP32_1X2__SCALAR_FMAGIC, k_eq_1_subtile) {
for (uint32_t m = 1; m <= 1; m++) {
for (uint32_t n = 1; n <= 2; n++) {
GemmMicrokernelTester()
@@ -78050,12 +78050,12 @@
.n(n)
.k(1)
.iterations(1)
- .Test(xnn_qc8_igemm_minmax_fp32_ukernel_1x2__scalar_magic, xnn_init_qs8_minmax_scalar_magic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qc8_igemm_minmax_fp32_ukernel_1x2__scalar_fmagic, xnn_init_qs8_minmax_scalar_fmagic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
}
-TEST(QC8_IGEMM_MINMAX_FP32_1X2__SCALAR_MAGIC, k_eq_1_subtile_m) {
+TEST(QC8_IGEMM_MINMAX_FP32_1X2__SCALAR_FMAGIC, k_eq_1_subtile_m) {
for (uint32_t m = 1; m <= 1; m++) {
GemmMicrokernelTester()
.mr(1)
@@ -78066,11 +78066,11 @@
.n(2)
.k(1)
.iterations(1)
- .Test(xnn_qc8_igemm_minmax_fp32_ukernel_1x2__scalar_magic, xnn_init_qs8_minmax_scalar_magic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qc8_igemm_minmax_fp32_ukernel_1x2__scalar_fmagic, xnn_init_qs8_minmax_scalar_fmagic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
-TEST(QC8_IGEMM_MINMAX_FP32_1X2__SCALAR_MAGIC, k_eq_1_subtile_n) {
+TEST(QC8_IGEMM_MINMAX_FP32_1X2__SCALAR_FMAGIC, k_eq_1_subtile_n) {
for (uint32_t n = 1; n <= 2; n++) {
GemmMicrokernelTester()
.mr(1)
@@ -78081,11 +78081,11 @@
.n(n)
.k(1)
.iterations(1)
- .Test(xnn_qc8_igemm_minmax_fp32_ukernel_1x2__scalar_magic, xnn_init_qs8_minmax_scalar_magic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qc8_igemm_minmax_fp32_ukernel_1x2__scalar_fmagic, xnn_init_qs8_minmax_scalar_fmagic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
-TEST(QC8_IGEMM_MINMAX_FP32_1X2__SCALAR_MAGIC, k_gt_1) {
+TEST(QC8_IGEMM_MINMAX_FP32_1X2__SCALAR_FMAGIC, k_gt_1) {
for (size_t k = 2; k < 10; k++) {
GemmMicrokernelTester()
.mr(1)
@@ -78095,11 +78095,11 @@
.m(1)
.n(2)
.k(k)
- .Test(xnn_qc8_igemm_minmax_fp32_ukernel_1x2__scalar_magic, xnn_init_qs8_minmax_scalar_magic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qc8_igemm_minmax_fp32_ukernel_1x2__scalar_fmagic, xnn_init_qs8_minmax_scalar_fmagic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
-TEST(QC8_IGEMM_MINMAX_FP32_1X2__SCALAR_MAGIC, k_gt_1_subtile) {
+TEST(QC8_IGEMM_MINMAX_FP32_1X2__SCALAR_FMAGIC, k_gt_1_subtile) {
for (size_t k = 2; k < 10; k++) {
for (uint32_t m = 1; m <= 1; m++) {
for (uint32_t n = 1; n <= 2; n++) {
@@ -78112,13 +78112,13 @@
.n(n)
.k(k)
.iterations(1)
- .Test(xnn_qc8_igemm_minmax_fp32_ukernel_1x2__scalar_magic, xnn_init_qs8_minmax_scalar_magic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qc8_igemm_minmax_fp32_ukernel_1x2__scalar_fmagic, xnn_init_qs8_minmax_scalar_fmagic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
}
}
-TEST(QC8_IGEMM_MINMAX_FP32_1X2__SCALAR_MAGIC, n_gt_2) {
+TEST(QC8_IGEMM_MINMAX_FP32_1X2__SCALAR_FMAGIC, n_gt_2) {
for (uint32_t n = 3; n < 4; n++) {
for (size_t k = 1; k <= 5; k += 2) {
GemmMicrokernelTester()
@@ -78129,12 +78129,12 @@
.m(1)
.n(2)
.k(k)
- .Test(xnn_qc8_igemm_minmax_fp32_ukernel_1x2__scalar_magic, xnn_init_qs8_minmax_scalar_magic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qc8_igemm_minmax_fp32_ukernel_1x2__scalar_fmagic, xnn_init_qs8_minmax_scalar_fmagic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
}
-TEST(QC8_IGEMM_MINMAX_FP32_1X2__SCALAR_MAGIC, n_gt_2_strided_cn) {
+TEST(QC8_IGEMM_MINMAX_FP32_1X2__SCALAR_FMAGIC, n_gt_2_strided_cn) {
for (uint32_t n = 3; n < 4; n++) {
for (size_t k = 1; k <= 5; k += 2) {
GemmMicrokernelTester()
@@ -78146,12 +78146,12 @@
.n(2)
.k(k)
.cn_stride(5)
- .Test(xnn_qc8_igemm_minmax_fp32_ukernel_1x2__scalar_magic, xnn_init_qs8_minmax_scalar_magic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qc8_igemm_minmax_fp32_ukernel_1x2__scalar_fmagic, xnn_init_qs8_minmax_scalar_fmagic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
}
-TEST(QC8_IGEMM_MINMAX_FP32_1X2__SCALAR_MAGIC, n_gt_2_subtile) {
+TEST(QC8_IGEMM_MINMAX_FP32_1X2__SCALAR_FMAGIC, n_gt_2_subtile) {
for (uint32_t n = 3; n < 4; n++) {
for (size_t k = 1; k <= 5; k += 2) {
for (uint32_t m = 1; m <= 1; m++) {
@@ -78164,13 +78164,13 @@
.n(n)
.k(k)
.iterations(1)
- .Test(xnn_qc8_igemm_minmax_fp32_ukernel_1x2__scalar_magic, xnn_init_qs8_minmax_scalar_magic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qc8_igemm_minmax_fp32_ukernel_1x2__scalar_fmagic, xnn_init_qs8_minmax_scalar_fmagic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
}
}
-TEST(QC8_IGEMM_MINMAX_FP32_1X2__SCALAR_MAGIC, n_div_2) {
+TEST(QC8_IGEMM_MINMAX_FP32_1X2__SCALAR_FMAGIC, n_div_2) {
for (uint32_t n = 4; n <= 6; n += 2) {
for (size_t k = 1; k <= 5; k += 2) {
GemmMicrokernelTester()
@@ -78181,12 +78181,12 @@
.m(1)
.n(2)
.k(k)
- .Test(xnn_qc8_igemm_minmax_fp32_ukernel_1x2__scalar_magic, xnn_init_qs8_minmax_scalar_magic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qc8_igemm_minmax_fp32_ukernel_1x2__scalar_fmagic, xnn_init_qs8_minmax_scalar_fmagic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
}
-TEST(QC8_IGEMM_MINMAX_FP32_1X2__SCALAR_MAGIC, n_div_2_strided_cn) {
+TEST(QC8_IGEMM_MINMAX_FP32_1X2__SCALAR_FMAGIC, n_div_2_strided_cn) {
for (uint32_t n = 4; n <= 6; n += 2) {
for (size_t k = 1; k <= 5; k += 2) {
GemmMicrokernelTester()
@@ -78198,12 +78198,12 @@
.n(n)
.k(k)
.cn_stride(5)
- .Test(xnn_qc8_igemm_minmax_fp32_ukernel_1x2__scalar_magic, xnn_init_qs8_minmax_scalar_magic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qc8_igemm_minmax_fp32_ukernel_1x2__scalar_fmagic, xnn_init_qs8_minmax_scalar_fmagic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
}
-TEST(QC8_IGEMM_MINMAX_FP32_1X2__SCALAR_MAGIC, n_div_2_subtile) {
+TEST(QC8_IGEMM_MINMAX_FP32_1X2__SCALAR_FMAGIC, n_div_2_subtile) {
for (uint32_t n = 4; n <= 6; n += 2) {
for (size_t k = 1; k <= 5; k += 2) {
for (uint32_t m = 1; m <= 1; m++) {
@@ -78216,13 +78216,13 @@
.n(n)
.k(k)
.iterations(1)
- .Test(xnn_qc8_igemm_minmax_fp32_ukernel_1x2__scalar_magic, xnn_init_qs8_minmax_scalar_magic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qc8_igemm_minmax_fp32_ukernel_1x2__scalar_fmagic, xnn_init_qs8_minmax_scalar_fmagic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
}
}
-TEST(QC8_IGEMM_MINMAX_FP32_1X2__SCALAR_MAGIC, small_kernel) {
+TEST(QC8_IGEMM_MINMAX_FP32_1X2__SCALAR_FMAGIC, small_kernel) {
for (size_t k = 1; k <= 5; k += 2) {
GemmMicrokernelTester()
.mr(1)
@@ -78233,11 +78233,11 @@
.n(2)
.k(k)
.ks(3)
- .Test(xnn_qc8_igemm_minmax_fp32_ukernel_1x2__scalar_magic, xnn_init_qs8_minmax_scalar_magic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qc8_igemm_minmax_fp32_ukernel_1x2__scalar_fmagic, xnn_init_qs8_minmax_scalar_fmagic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
-TEST(QC8_IGEMM_MINMAX_FP32_1X2__SCALAR_MAGIC, small_kernel_subtile) {
+TEST(QC8_IGEMM_MINMAX_FP32_1X2__SCALAR_FMAGIC, small_kernel_subtile) {
for (size_t k = 1; k <= 5; k += 2) {
for (uint32_t m = 1; m <= 1; m++) {
for (uint32_t n = 1; n <= 2; n++) {
@@ -78251,13 +78251,13 @@
.k(k)
.ks(3)
.iterations(1)
- .Test(xnn_qc8_igemm_minmax_fp32_ukernel_1x2__scalar_magic, xnn_init_qs8_minmax_scalar_magic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qc8_igemm_minmax_fp32_ukernel_1x2__scalar_fmagic, xnn_init_qs8_minmax_scalar_fmagic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
}
}
-TEST(QC8_IGEMM_MINMAX_FP32_1X2__SCALAR_MAGIC, n_gt_2_small_kernel) {
+TEST(QC8_IGEMM_MINMAX_FP32_1X2__SCALAR_FMAGIC, n_gt_2_small_kernel) {
for (uint32_t n = 3; n < 4; n++) {
for (size_t k = 1; k <= 5; k += 2) {
GemmMicrokernelTester()
@@ -78269,12 +78269,12 @@
.n(2)
.k(k)
.ks(3)
- .Test(xnn_qc8_igemm_minmax_fp32_ukernel_1x2__scalar_magic, xnn_init_qs8_minmax_scalar_magic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qc8_igemm_minmax_fp32_ukernel_1x2__scalar_fmagic, xnn_init_qs8_minmax_scalar_fmagic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
}
-TEST(QC8_IGEMM_MINMAX_FP32_1X2__SCALAR_MAGIC, n_div_2_small_kernel) {
+TEST(QC8_IGEMM_MINMAX_FP32_1X2__SCALAR_FMAGIC, n_div_2_small_kernel) {
for (uint32_t n = 4; n <= 6; n += 2) {
for (size_t k = 1; k <= 5; k += 2) {
GemmMicrokernelTester()
@@ -78286,12 +78286,12 @@
.n(2)
.k(k)
.ks(3)
- .Test(xnn_qc8_igemm_minmax_fp32_ukernel_1x2__scalar_magic, xnn_init_qs8_minmax_scalar_magic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qc8_igemm_minmax_fp32_ukernel_1x2__scalar_fmagic, xnn_init_qs8_minmax_scalar_fmagic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
}
-TEST(QC8_IGEMM_MINMAX_FP32_1X2__SCALAR_MAGIC, strided_cm_subtile) {
+TEST(QC8_IGEMM_MINMAX_FP32_1X2__SCALAR_FMAGIC, strided_cm_subtile) {
for (size_t k = 1; k <= 5; k += 2) {
for (uint32_t m = 1; m <= 1; m++) {
for (uint32_t n = 1; n <= 2; n++) {
@@ -78305,13 +78305,13 @@
.k(k)
.cm_stride(5)
.iterations(1)
- .Test(xnn_qc8_igemm_minmax_fp32_ukernel_1x2__scalar_magic, xnn_init_qs8_minmax_scalar_magic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qc8_igemm_minmax_fp32_ukernel_1x2__scalar_fmagic, xnn_init_qs8_minmax_scalar_fmagic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
}
}
-TEST(QC8_IGEMM_MINMAX_FP32_1X2__SCALAR_MAGIC, a_offset) {
+TEST(QC8_IGEMM_MINMAX_FP32_1X2__SCALAR_FMAGIC, a_offset) {
for (size_t k = 1; k <= 5; k += 2) {
GemmMicrokernelTester()
.mr(1)
@@ -78323,11 +78323,11 @@
.k(k)
.ks(3)
.a_offset(7)
- .Test(xnn_qc8_igemm_minmax_fp32_ukernel_1x2__scalar_magic, xnn_init_qs8_minmax_scalar_magic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qc8_igemm_minmax_fp32_ukernel_1x2__scalar_fmagic, xnn_init_qs8_minmax_scalar_fmagic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
-TEST(QC8_IGEMM_MINMAX_FP32_1X2__SCALAR_MAGIC, zero) {
+TEST(QC8_IGEMM_MINMAX_FP32_1X2__SCALAR_FMAGIC, zero) {
for (uint32_t mz = 0; mz < 1; mz++) {
for (size_t k = 1; k <= 5; k += 2) {
GemmMicrokernelTester()
@@ -78341,12 +78341,12 @@
.ks(3)
.a_offset(7)
.zero_index(mz)
- .Test(xnn_qc8_igemm_minmax_fp32_ukernel_1x2__scalar_magic, xnn_init_qs8_minmax_scalar_magic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qc8_igemm_minmax_fp32_ukernel_1x2__scalar_fmagic, xnn_init_qs8_minmax_scalar_fmagic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
}
-TEST(QC8_IGEMM_MINMAX_FP32_1X2__SCALAR_MAGIC, qmin) {
+TEST(QC8_IGEMM_MINMAX_FP32_1X2__SCALAR_FMAGIC, qmin) {
GemmMicrokernelTester()
.mr(1)
.nr(2)
@@ -78356,10 +78356,10 @@
.n(2)
.k(1)
.qmin(128)
- .Test(xnn_qc8_igemm_minmax_fp32_ukernel_1x2__scalar_magic, xnn_init_qs8_minmax_scalar_magic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qc8_igemm_minmax_fp32_ukernel_1x2__scalar_fmagic, xnn_init_qs8_minmax_scalar_fmagic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
-TEST(QC8_IGEMM_MINMAX_FP32_1X2__SCALAR_MAGIC, qmax) {
+TEST(QC8_IGEMM_MINMAX_FP32_1X2__SCALAR_FMAGIC, qmax) {
GemmMicrokernelTester()
.mr(1)
.nr(2)
@@ -78369,10 +78369,10 @@
.n(2)
.k(1)
.qmax(128)
- .Test(xnn_qc8_igemm_minmax_fp32_ukernel_1x2__scalar_magic, xnn_init_qs8_minmax_scalar_magic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qc8_igemm_minmax_fp32_ukernel_1x2__scalar_fmagic, xnn_init_qs8_minmax_scalar_fmagic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
-TEST(QC8_IGEMM_MINMAX_FP32_1X2__SCALAR_MAGIC, strided_cm) {
+TEST(QC8_IGEMM_MINMAX_FP32_1X2__SCALAR_FMAGIC, strided_cm) {
GemmMicrokernelTester()
.mr(1)
.nr(2)
@@ -78382,11 +78382,11 @@
.n(2)
.k(1)
.cm_stride(5)
- .Test(xnn_qc8_igemm_minmax_fp32_ukernel_1x2__scalar_magic, xnn_init_qs8_minmax_scalar_magic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qc8_igemm_minmax_fp32_ukernel_1x2__scalar_fmagic, xnn_init_qs8_minmax_scalar_fmagic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
-TEST(QC8_IGEMM_MINMAX_FP32_2X2__SCALAR_MAGIC, k_eq_1) {
+TEST(QC8_IGEMM_MINMAX_FP32_2X2__SCALAR_FMAGIC, k_eq_1) {
GemmMicrokernelTester()
.mr(2)
.nr(2)
@@ -78395,10 +78395,10 @@
.m(2)
.n(2)
.k(1)
- .Test(xnn_qc8_igemm_minmax_fp32_ukernel_2x2__scalar_magic, xnn_init_qs8_minmax_scalar_magic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qc8_igemm_minmax_fp32_ukernel_2x2__scalar_fmagic, xnn_init_qs8_minmax_scalar_fmagic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
-TEST(QC8_IGEMM_MINMAX_FP32_2X2__SCALAR_MAGIC, strided_cn) {
+TEST(QC8_IGEMM_MINMAX_FP32_2X2__SCALAR_FMAGIC, strided_cn) {
GemmMicrokernelTester()
.mr(2)
.nr(2)
@@ -78408,10 +78408,10 @@
.n(2)
.k(1)
.cn_stride(5)
- .Test(xnn_qc8_igemm_minmax_fp32_ukernel_2x2__scalar_magic, xnn_init_qs8_minmax_scalar_magic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qc8_igemm_minmax_fp32_ukernel_2x2__scalar_fmagic, xnn_init_qs8_minmax_scalar_fmagic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
-TEST(QC8_IGEMM_MINMAX_FP32_2X2__SCALAR_MAGIC, k_eq_1_subtile) {
+TEST(QC8_IGEMM_MINMAX_FP32_2X2__SCALAR_FMAGIC, k_eq_1_subtile) {
for (uint32_t m = 1; m <= 2; m++) {
for (uint32_t n = 1; n <= 2; n++) {
GemmMicrokernelTester()
@@ -78423,12 +78423,12 @@
.n(n)
.k(1)
.iterations(1)
- .Test(xnn_qc8_igemm_minmax_fp32_ukernel_2x2__scalar_magic, xnn_init_qs8_minmax_scalar_magic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qc8_igemm_minmax_fp32_ukernel_2x2__scalar_fmagic, xnn_init_qs8_minmax_scalar_fmagic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
}
-TEST(QC8_IGEMM_MINMAX_FP32_2X2__SCALAR_MAGIC, k_eq_1_subtile_m) {
+TEST(QC8_IGEMM_MINMAX_FP32_2X2__SCALAR_FMAGIC, k_eq_1_subtile_m) {
for (uint32_t m = 1; m <= 2; m++) {
GemmMicrokernelTester()
.mr(2)
@@ -78439,11 +78439,11 @@
.n(2)
.k(1)
.iterations(1)
- .Test(xnn_qc8_igemm_minmax_fp32_ukernel_2x2__scalar_magic, xnn_init_qs8_minmax_scalar_magic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qc8_igemm_minmax_fp32_ukernel_2x2__scalar_fmagic, xnn_init_qs8_minmax_scalar_fmagic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
-TEST(QC8_IGEMM_MINMAX_FP32_2X2__SCALAR_MAGIC, k_eq_1_subtile_n) {
+TEST(QC8_IGEMM_MINMAX_FP32_2X2__SCALAR_FMAGIC, k_eq_1_subtile_n) {
for (uint32_t n = 1; n <= 2; n++) {
GemmMicrokernelTester()
.mr(2)
@@ -78454,11 +78454,11 @@
.n(n)
.k(1)
.iterations(1)
- .Test(xnn_qc8_igemm_minmax_fp32_ukernel_2x2__scalar_magic, xnn_init_qs8_minmax_scalar_magic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qc8_igemm_minmax_fp32_ukernel_2x2__scalar_fmagic, xnn_init_qs8_minmax_scalar_fmagic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
-TEST(QC8_IGEMM_MINMAX_FP32_2X2__SCALAR_MAGIC, k_gt_1) {
+TEST(QC8_IGEMM_MINMAX_FP32_2X2__SCALAR_FMAGIC, k_gt_1) {
for (size_t k = 2; k < 10; k++) {
GemmMicrokernelTester()
.mr(2)
@@ -78468,11 +78468,11 @@
.m(2)
.n(2)
.k(k)
- .Test(xnn_qc8_igemm_minmax_fp32_ukernel_2x2__scalar_magic, xnn_init_qs8_minmax_scalar_magic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qc8_igemm_minmax_fp32_ukernel_2x2__scalar_fmagic, xnn_init_qs8_minmax_scalar_fmagic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
-TEST(QC8_IGEMM_MINMAX_FP32_2X2__SCALAR_MAGIC, k_gt_1_subtile) {
+TEST(QC8_IGEMM_MINMAX_FP32_2X2__SCALAR_FMAGIC, k_gt_1_subtile) {
for (size_t k = 2; k < 10; k++) {
for (uint32_t m = 1; m <= 2; m++) {
for (uint32_t n = 1; n <= 2; n++) {
@@ -78485,13 +78485,13 @@
.n(n)
.k(k)
.iterations(1)
- .Test(xnn_qc8_igemm_minmax_fp32_ukernel_2x2__scalar_magic, xnn_init_qs8_minmax_scalar_magic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qc8_igemm_minmax_fp32_ukernel_2x2__scalar_fmagic, xnn_init_qs8_minmax_scalar_fmagic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
}
}
-TEST(QC8_IGEMM_MINMAX_FP32_2X2__SCALAR_MAGIC, n_gt_2) {
+TEST(QC8_IGEMM_MINMAX_FP32_2X2__SCALAR_FMAGIC, n_gt_2) {
for (uint32_t n = 3; n < 4; n++) {
for (size_t k = 1; k <= 5; k += 2) {
GemmMicrokernelTester()
@@ -78502,12 +78502,12 @@
.m(2)
.n(2)
.k(k)
- .Test(xnn_qc8_igemm_minmax_fp32_ukernel_2x2__scalar_magic, xnn_init_qs8_minmax_scalar_magic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qc8_igemm_minmax_fp32_ukernel_2x2__scalar_fmagic, xnn_init_qs8_minmax_scalar_fmagic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
}
-TEST(QC8_IGEMM_MINMAX_FP32_2X2__SCALAR_MAGIC, n_gt_2_strided_cn) {
+TEST(QC8_IGEMM_MINMAX_FP32_2X2__SCALAR_FMAGIC, n_gt_2_strided_cn) {
for (uint32_t n = 3; n < 4; n++) {
for (size_t k = 1; k <= 5; k += 2) {
GemmMicrokernelTester()
@@ -78519,12 +78519,12 @@
.n(2)
.k(k)
.cn_stride(5)
- .Test(xnn_qc8_igemm_minmax_fp32_ukernel_2x2__scalar_magic, xnn_init_qs8_minmax_scalar_magic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qc8_igemm_minmax_fp32_ukernel_2x2__scalar_fmagic, xnn_init_qs8_minmax_scalar_fmagic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
}
-TEST(QC8_IGEMM_MINMAX_FP32_2X2__SCALAR_MAGIC, n_gt_2_subtile) {
+TEST(QC8_IGEMM_MINMAX_FP32_2X2__SCALAR_FMAGIC, n_gt_2_subtile) {
for (uint32_t n = 3; n < 4; n++) {
for (size_t k = 1; k <= 5; k += 2) {
for (uint32_t m = 1; m <= 2; m++) {
@@ -78537,13 +78537,13 @@
.n(n)
.k(k)
.iterations(1)
- .Test(xnn_qc8_igemm_minmax_fp32_ukernel_2x2__scalar_magic, xnn_init_qs8_minmax_scalar_magic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qc8_igemm_minmax_fp32_ukernel_2x2__scalar_fmagic, xnn_init_qs8_minmax_scalar_fmagic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
}
}
-TEST(QC8_IGEMM_MINMAX_FP32_2X2__SCALAR_MAGIC, n_div_2) {
+TEST(QC8_IGEMM_MINMAX_FP32_2X2__SCALAR_FMAGIC, n_div_2) {
for (uint32_t n = 4; n <= 6; n += 2) {
for (size_t k = 1; k <= 5; k += 2) {
GemmMicrokernelTester()
@@ -78554,12 +78554,12 @@
.m(2)
.n(2)
.k(k)
- .Test(xnn_qc8_igemm_minmax_fp32_ukernel_2x2__scalar_magic, xnn_init_qs8_minmax_scalar_magic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qc8_igemm_minmax_fp32_ukernel_2x2__scalar_fmagic, xnn_init_qs8_minmax_scalar_fmagic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
}
-TEST(QC8_IGEMM_MINMAX_FP32_2X2__SCALAR_MAGIC, n_div_2_strided_cn) {
+TEST(QC8_IGEMM_MINMAX_FP32_2X2__SCALAR_FMAGIC, n_div_2_strided_cn) {
for (uint32_t n = 4; n <= 6; n += 2) {
for (size_t k = 1; k <= 5; k += 2) {
GemmMicrokernelTester()
@@ -78571,12 +78571,12 @@
.n(n)
.k(k)
.cn_stride(5)
- .Test(xnn_qc8_igemm_minmax_fp32_ukernel_2x2__scalar_magic, xnn_init_qs8_minmax_scalar_magic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qc8_igemm_minmax_fp32_ukernel_2x2__scalar_fmagic, xnn_init_qs8_minmax_scalar_fmagic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
}
-TEST(QC8_IGEMM_MINMAX_FP32_2X2__SCALAR_MAGIC, n_div_2_subtile) {
+TEST(QC8_IGEMM_MINMAX_FP32_2X2__SCALAR_FMAGIC, n_div_2_subtile) {
for (uint32_t n = 4; n <= 6; n += 2) {
for (size_t k = 1; k <= 5; k += 2) {
for (uint32_t m = 1; m <= 2; m++) {
@@ -78589,13 +78589,13 @@
.n(n)
.k(k)
.iterations(1)
- .Test(xnn_qc8_igemm_minmax_fp32_ukernel_2x2__scalar_magic, xnn_init_qs8_minmax_scalar_magic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qc8_igemm_minmax_fp32_ukernel_2x2__scalar_fmagic, xnn_init_qs8_minmax_scalar_fmagic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
}
}
-TEST(QC8_IGEMM_MINMAX_FP32_2X2__SCALAR_MAGIC, small_kernel) {
+TEST(QC8_IGEMM_MINMAX_FP32_2X2__SCALAR_FMAGIC, small_kernel) {
for (size_t k = 1; k <= 5; k += 2) {
GemmMicrokernelTester()
.mr(2)
@@ -78606,11 +78606,11 @@
.n(2)
.k(k)
.ks(3)
- .Test(xnn_qc8_igemm_minmax_fp32_ukernel_2x2__scalar_magic, xnn_init_qs8_minmax_scalar_magic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qc8_igemm_minmax_fp32_ukernel_2x2__scalar_fmagic, xnn_init_qs8_minmax_scalar_fmagic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
-TEST(QC8_IGEMM_MINMAX_FP32_2X2__SCALAR_MAGIC, small_kernel_subtile) {
+TEST(QC8_IGEMM_MINMAX_FP32_2X2__SCALAR_FMAGIC, small_kernel_subtile) {
for (size_t k = 1; k <= 5; k += 2) {
for (uint32_t m = 1; m <= 2; m++) {
for (uint32_t n = 1; n <= 2; n++) {
@@ -78624,13 +78624,13 @@
.k(k)
.ks(3)
.iterations(1)
- .Test(xnn_qc8_igemm_minmax_fp32_ukernel_2x2__scalar_magic, xnn_init_qs8_minmax_scalar_magic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qc8_igemm_minmax_fp32_ukernel_2x2__scalar_fmagic, xnn_init_qs8_minmax_scalar_fmagic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
}
}
-TEST(QC8_IGEMM_MINMAX_FP32_2X2__SCALAR_MAGIC, n_gt_2_small_kernel) {
+TEST(QC8_IGEMM_MINMAX_FP32_2X2__SCALAR_FMAGIC, n_gt_2_small_kernel) {
for (uint32_t n = 3; n < 4; n++) {
for (size_t k = 1; k <= 5; k += 2) {
GemmMicrokernelTester()
@@ -78642,12 +78642,12 @@
.n(2)
.k(k)
.ks(3)
- .Test(xnn_qc8_igemm_minmax_fp32_ukernel_2x2__scalar_magic, xnn_init_qs8_minmax_scalar_magic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qc8_igemm_minmax_fp32_ukernel_2x2__scalar_fmagic, xnn_init_qs8_minmax_scalar_fmagic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
}
-TEST(QC8_IGEMM_MINMAX_FP32_2X2__SCALAR_MAGIC, n_div_2_small_kernel) {
+TEST(QC8_IGEMM_MINMAX_FP32_2X2__SCALAR_FMAGIC, n_div_2_small_kernel) {
for (uint32_t n = 4; n <= 6; n += 2) {
for (size_t k = 1; k <= 5; k += 2) {
GemmMicrokernelTester()
@@ -78659,12 +78659,12 @@
.n(2)
.k(k)
.ks(3)
- .Test(xnn_qc8_igemm_minmax_fp32_ukernel_2x2__scalar_magic, xnn_init_qs8_minmax_scalar_magic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qc8_igemm_minmax_fp32_ukernel_2x2__scalar_fmagic, xnn_init_qs8_minmax_scalar_fmagic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
}
-TEST(QC8_IGEMM_MINMAX_FP32_2X2__SCALAR_MAGIC, strided_cm_subtile) {
+TEST(QC8_IGEMM_MINMAX_FP32_2X2__SCALAR_FMAGIC, strided_cm_subtile) {
for (size_t k = 1; k <= 5; k += 2) {
for (uint32_t m = 1; m <= 2; m++) {
for (uint32_t n = 1; n <= 2; n++) {
@@ -78678,13 +78678,13 @@
.k(k)
.cm_stride(5)
.iterations(1)
- .Test(xnn_qc8_igemm_minmax_fp32_ukernel_2x2__scalar_magic, xnn_init_qs8_minmax_scalar_magic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qc8_igemm_minmax_fp32_ukernel_2x2__scalar_fmagic, xnn_init_qs8_minmax_scalar_fmagic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
}
}
-TEST(QC8_IGEMM_MINMAX_FP32_2X2__SCALAR_MAGIC, a_offset) {
+TEST(QC8_IGEMM_MINMAX_FP32_2X2__SCALAR_FMAGIC, a_offset) {
for (size_t k = 1; k <= 5; k += 2) {
GemmMicrokernelTester()
.mr(2)
@@ -78696,11 +78696,11 @@
.k(k)
.ks(3)
.a_offset(13)
- .Test(xnn_qc8_igemm_minmax_fp32_ukernel_2x2__scalar_magic, xnn_init_qs8_minmax_scalar_magic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qc8_igemm_minmax_fp32_ukernel_2x2__scalar_fmagic, xnn_init_qs8_minmax_scalar_fmagic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
-TEST(QC8_IGEMM_MINMAX_FP32_2X2__SCALAR_MAGIC, zero) {
+TEST(QC8_IGEMM_MINMAX_FP32_2X2__SCALAR_FMAGIC, zero) {
for (uint32_t mz = 0; mz < 2; mz++) {
for (size_t k = 1; k <= 5; k += 2) {
GemmMicrokernelTester()
@@ -78714,12 +78714,12 @@
.ks(3)
.a_offset(13)
.zero_index(mz)
- .Test(xnn_qc8_igemm_minmax_fp32_ukernel_2x2__scalar_magic, xnn_init_qs8_minmax_scalar_magic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qc8_igemm_minmax_fp32_ukernel_2x2__scalar_fmagic, xnn_init_qs8_minmax_scalar_fmagic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
}
-TEST(QC8_IGEMM_MINMAX_FP32_2X2__SCALAR_MAGIC, qmin) {
+TEST(QC8_IGEMM_MINMAX_FP32_2X2__SCALAR_FMAGIC, qmin) {
GemmMicrokernelTester()
.mr(2)
.nr(2)
@@ -78729,10 +78729,10 @@
.n(2)
.k(1)
.qmin(128)
- .Test(xnn_qc8_igemm_minmax_fp32_ukernel_2x2__scalar_magic, xnn_init_qs8_minmax_scalar_magic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qc8_igemm_minmax_fp32_ukernel_2x2__scalar_fmagic, xnn_init_qs8_minmax_scalar_fmagic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
-TEST(QC8_IGEMM_MINMAX_FP32_2X2__SCALAR_MAGIC, qmax) {
+TEST(QC8_IGEMM_MINMAX_FP32_2X2__SCALAR_FMAGIC, qmax) {
GemmMicrokernelTester()
.mr(2)
.nr(2)
@@ -78742,10 +78742,10 @@
.n(2)
.k(1)
.qmax(128)
- .Test(xnn_qc8_igemm_minmax_fp32_ukernel_2x2__scalar_magic, xnn_init_qs8_minmax_scalar_magic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qc8_igemm_minmax_fp32_ukernel_2x2__scalar_fmagic, xnn_init_qs8_minmax_scalar_fmagic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
-TEST(QC8_IGEMM_MINMAX_FP32_2X2__SCALAR_MAGIC, strided_cm) {
+TEST(QC8_IGEMM_MINMAX_FP32_2X2__SCALAR_FMAGIC, strided_cm) {
GemmMicrokernelTester()
.mr(2)
.nr(2)
@@ -78755,11 +78755,11 @@
.n(2)
.k(1)
.cm_stride(5)
- .Test(xnn_qc8_igemm_minmax_fp32_ukernel_2x2__scalar_magic, xnn_init_qs8_minmax_scalar_magic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qc8_igemm_minmax_fp32_ukernel_2x2__scalar_fmagic, xnn_init_qs8_minmax_scalar_fmagic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
-TEST(QC8_IGEMM_MINMAX_FP32_3X2__SCALAR_MAGIC, k_eq_1) {
+TEST(QC8_IGEMM_MINMAX_FP32_3X2__SCALAR_FMAGIC, k_eq_1) {
GemmMicrokernelTester()
.mr(3)
.nr(2)
@@ -78768,10 +78768,10 @@
.m(3)
.n(2)
.k(1)
- .Test(xnn_qc8_igemm_minmax_fp32_ukernel_3x2__scalar_magic, xnn_init_qs8_minmax_scalar_magic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qc8_igemm_minmax_fp32_ukernel_3x2__scalar_fmagic, xnn_init_qs8_minmax_scalar_fmagic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
-TEST(QC8_IGEMM_MINMAX_FP32_3X2__SCALAR_MAGIC, strided_cn) {
+TEST(QC8_IGEMM_MINMAX_FP32_3X2__SCALAR_FMAGIC, strided_cn) {
GemmMicrokernelTester()
.mr(3)
.nr(2)
@@ -78781,10 +78781,10 @@
.n(2)
.k(1)
.cn_stride(5)
- .Test(xnn_qc8_igemm_minmax_fp32_ukernel_3x2__scalar_magic, xnn_init_qs8_minmax_scalar_magic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qc8_igemm_minmax_fp32_ukernel_3x2__scalar_fmagic, xnn_init_qs8_minmax_scalar_fmagic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
-TEST(QC8_IGEMM_MINMAX_FP32_3X2__SCALAR_MAGIC, k_eq_1_subtile) {
+TEST(QC8_IGEMM_MINMAX_FP32_3X2__SCALAR_FMAGIC, k_eq_1_subtile) {
for (uint32_t m = 1; m <= 3; m++) {
for (uint32_t n = 1; n <= 2; n++) {
GemmMicrokernelTester()
@@ -78796,12 +78796,12 @@
.n(n)
.k(1)
.iterations(1)
- .Test(xnn_qc8_igemm_minmax_fp32_ukernel_3x2__scalar_magic, xnn_init_qs8_minmax_scalar_magic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qc8_igemm_minmax_fp32_ukernel_3x2__scalar_fmagic, xnn_init_qs8_minmax_scalar_fmagic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
}
-TEST(QC8_IGEMM_MINMAX_FP32_3X2__SCALAR_MAGIC, k_eq_1_subtile_m) {
+TEST(QC8_IGEMM_MINMAX_FP32_3X2__SCALAR_FMAGIC, k_eq_1_subtile_m) {
for (uint32_t m = 1; m <= 3; m++) {
GemmMicrokernelTester()
.mr(3)
@@ -78812,11 +78812,11 @@
.n(2)
.k(1)
.iterations(1)
- .Test(xnn_qc8_igemm_minmax_fp32_ukernel_3x2__scalar_magic, xnn_init_qs8_minmax_scalar_magic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qc8_igemm_minmax_fp32_ukernel_3x2__scalar_fmagic, xnn_init_qs8_minmax_scalar_fmagic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
-TEST(QC8_IGEMM_MINMAX_FP32_3X2__SCALAR_MAGIC, k_eq_1_subtile_n) {
+TEST(QC8_IGEMM_MINMAX_FP32_3X2__SCALAR_FMAGIC, k_eq_1_subtile_n) {
for (uint32_t n = 1; n <= 2; n++) {
GemmMicrokernelTester()
.mr(3)
@@ -78827,11 +78827,11 @@
.n(n)
.k(1)
.iterations(1)
- .Test(xnn_qc8_igemm_minmax_fp32_ukernel_3x2__scalar_magic, xnn_init_qs8_minmax_scalar_magic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qc8_igemm_minmax_fp32_ukernel_3x2__scalar_fmagic, xnn_init_qs8_minmax_scalar_fmagic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
-TEST(QC8_IGEMM_MINMAX_FP32_3X2__SCALAR_MAGIC, k_gt_1) {
+TEST(QC8_IGEMM_MINMAX_FP32_3X2__SCALAR_FMAGIC, k_gt_1) {
for (size_t k = 2; k < 10; k++) {
GemmMicrokernelTester()
.mr(3)
@@ -78841,11 +78841,11 @@
.m(3)
.n(2)
.k(k)
- .Test(xnn_qc8_igemm_minmax_fp32_ukernel_3x2__scalar_magic, xnn_init_qs8_minmax_scalar_magic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qc8_igemm_minmax_fp32_ukernel_3x2__scalar_fmagic, xnn_init_qs8_minmax_scalar_fmagic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
-TEST(QC8_IGEMM_MINMAX_FP32_3X2__SCALAR_MAGIC, k_gt_1_subtile) {
+TEST(QC8_IGEMM_MINMAX_FP32_3X2__SCALAR_FMAGIC, k_gt_1_subtile) {
for (size_t k = 2; k < 10; k++) {
for (uint32_t m = 1; m <= 3; m++) {
for (uint32_t n = 1; n <= 2; n++) {
@@ -78858,13 +78858,13 @@
.n(n)
.k(k)
.iterations(1)
- .Test(xnn_qc8_igemm_minmax_fp32_ukernel_3x2__scalar_magic, xnn_init_qs8_minmax_scalar_magic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qc8_igemm_minmax_fp32_ukernel_3x2__scalar_fmagic, xnn_init_qs8_minmax_scalar_fmagic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
}
}
-TEST(QC8_IGEMM_MINMAX_FP32_3X2__SCALAR_MAGIC, n_gt_2) {
+TEST(QC8_IGEMM_MINMAX_FP32_3X2__SCALAR_FMAGIC, n_gt_2) {
for (uint32_t n = 3; n < 4; n++) {
for (size_t k = 1; k <= 5; k += 2) {
GemmMicrokernelTester()
@@ -78875,12 +78875,12 @@
.m(3)
.n(2)
.k(k)
- .Test(xnn_qc8_igemm_minmax_fp32_ukernel_3x2__scalar_magic, xnn_init_qs8_minmax_scalar_magic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qc8_igemm_minmax_fp32_ukernel_3x2__scalar_fmagic, xnn_init_qs8_minmax_scalar_fmagic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
}
-TEST(QC8_IGEMM_MINMAX_FP32_3X2__SCALAR_MAGIC, n_gt_2_strided_cn) {
+TEST(QC8_IGEMM_MINMAX_FP32_3X2__SCALAR_FMAGIC, n_gt_2_strided_cn) {
for (uint32_t n = 3; n < 4; n++) {
for (size_t k = 1; k <= 5; k += 2) {
GemmMicrokernelTester()
@@ -78892,12 +78892,12 @@
.n(2)
.k(k)
.cn_stride(5)
- .Test(xnn_qc8_igemm_minmax_fp32_ukernel_3x2__scalar_magic, xnn_init_qs8_minmax_scalar_magic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qc8_igemm_minmax_fp32_ukernel_3x2__scalar_fmagic, xnn_init_qs8_minmax_scalar_fmagic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
}
-TEST(QC8_IGEMM_MINMAX_FP32_3X2__SCALAR_MAGIC, n_gt_2_subtile) {
+TEST(QC8_IGEMM_MINMAX_FP32_3X2__SCALAR_FMAGIC, n_gt_2_subtile) {
for (uint32_t n = 3; n < 4; n++) {
for (size_t k = 1; k <= 5; k += 2) {
for (uint32_t m = 1; m <= 3; m++) {
@@ -78910,13 +78910,13 @@
.n(n)
.k(k)
.iterations(1)
- .Test(xnn_qc8_igemm_minmax_fp32_ukernel_3x2__scalar_magic, xnn_init_qs8_minmax_scalar_magic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qc8_igemm_minmax_fp32_ukernel_3x2__scalar_fmagic, xnn_init_qs8_minmax_scalar_fmagic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
}
}
-TEST(QC8_IGEMM_MINMAX_FP32_3X2__SCALAR_MAGIC, n_div_2) {
+TEST(QC8_IGEMM_MINMAX_FP32_3X2__SCALAR_FMAGIC, n_div_2) {
for (uint32_t n = 4; n <= 6; n += 2) {
for (size_t k = 1; k <= 5; k += 2) {
GemmMicrokernelTester()
@@ -78927,12 +78927,12 @@
.m(3)
.n(2)
.k(k)
- .Test(xnn_qc8_igemm_minmax_fp32_ukernel_3x2__scalar_magic, xnn_init_qs8_minmax_scalar_magic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qc8_igemm_minmax_fp32_ukernel_3x2__scalar_fmagic, xnn_init_qs8_minmax_scalar_fmagic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
}
-TEST(QC8_IGEMM_MINMAX_FP32_3X2__SCALAR_MAGIC, n_div_2_strided_cn) {
+TEST(QC8_IGEMM_MINMAX_FP32_3X2__SCALAR_FMAGIC, n_div_2_strided_cn) {
for (uint32_t n = 4; n <= 6; n += 2) {
for (size_t k = 1; k <= 5; k += 2) {
GemmMicrokernelTester()
@@ -78944,12 +78944,12 @@
.n(n)
.k(k)
.cn_stride(5)
- .Test(xnn_qc8_igemm_minmax_fp32_ukernel_3x2__scalar_magic, xnn_init_qs8_minmax_scalar_magic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qc8_igemm_minmax_fp32_ukernel_3x2__scalar_fmagic, xnn_init_qs8_minmax_scalar_fmagic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
}
-TEST(QC8_IGEMM_MINMAX_FP32_3X2__SCALAR_MAGIC, n_div_2_subtile) {
+TEST(QC8_IGEMM_MINMAX_FP32_3X2__SCALAR_FMAGIC, n_div_2_subtile) {
for (uint32_t n = 4; n <= 6; n += 2) {
for (size_t k = 1; k <= 5; k += 2) {
for (uint32_t m = 1; m <= 3; m++) {
@@ -78962,13 +78962,13 @@
.n(n)
.k(k)
.iterations(1)
- .Test(xnn_qc8_igemm_minmax_fp32_ukernel_3x2__scalar_magic, xnn_init_qs8_minmax_scalar_magic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qc8_igemm_minmax_fp32_ukernel_3x2__scalar_fmagic, xnn_init_qs8_minmax_scalar_fmagic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
}
}
-TEST(QC8_IGEMM_MINMAX_FP32_3X2__SCALAR_MAGIC, small_kernel) {
+TEST(QC8_IGEMM_MINMAX_FP32_3X2__SCALAR_FMAGIC, small_kernel) {
for (size_t k = 1; k <= 5; k += 2) {
GemmMicrokernelTester()
.mr(3)
@@ -78979,11 +78979,11 @@
.n(2)
.k(k)
.ks(3)
- .Test(xnn_qc8_igemm_minmax_fp32_ukernel_3x2__scalar_magic, xnn_init_qs8_minmax_scalar_magic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qc8_igemm_minmax_fp32_ukernel_3x2__scalar_fmagic, xnn_init_qs8_minmax_scalar_fmagic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
-TEST(QC8_IGEMM_MINMAX_FP32_3X2__SCALAR_MAGIC, small_kernel_subtile) {
+TEST(QC8_IGEMM_MINMAX_FP32_3X2__SCALAR_FMAGIC, small_kernel_subtile) {
for (size_t k = 1; k <= 5; k += 2) {
for (uint32_t m = 1; m <= 3; m++) {
for (uint32_t n = 1; n <= 2; n++) {
@@ -78997,13 +78997,13 @@
.k(k)
.ks(3)
.iterations(1)
- .Test(xnn_qc8_igemm_minmax_fp32_ukernel_3x2__scalar_magic, xnn_init_qs8_minmax_scalar_magic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qc8_igemm_minmax_fp32_ukernel_3x2__scalar_fmagic, xnn_init_qs8_minmax_scalar_fmagic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
}
}
-TEST(QC8_IGEMM_MINMAX_FP32_3X2__SCALAR_MAGIC, n_gt_2_small_kernel) {
+TEST(QC8_IGEMM_MINMAX_FP32_3X2__SCALAR_FMAGIC, n_gt_2_small_kernel) {
for (uint32_t n = 3; n < 4; n++) {
for (size_t k = 1; k <= 5; k += 2) {
GemmMicrokernelTester()
@@ -79015,12 +79015,12 @@
.n(2)
.k(k)
.ks(3)
- .Test(xnn_qc8_igemm_minmax_fp32_ukernel_3x2__scalar_magic, xnn_init_qs8_minmax_scalar_magic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qc8_igemm_minmax_fp32_ukernel_3x2__scalar_fmagic, xnn_init_qs8_minmax_scalar_fmagic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
}
-TEST(QC8_IGEMM_MINMAX_FP32_3X2__SCALAR_MAGIC, n_div_2_small_kernel) {
+TEST(QC8_IGEMM_MINMAX_FP32_3X2__SCALAR_FMAGIC, n_div_2_small_kernel) {
for (uint32_t n = 4; n <= 6; n += 2) {
for (size_t k = 1; k <= 5; k += 2) {
GemmMicrokernelTester()
@@ -79032,12 +79032,12 @@
.n(2)
.k(k)
.ks(3)
- .Test(xnn_qc8_igemm_minmax_fp32_ukernel_3x2__scalar_magic, xnn_init_qs8_minmax_scalar_magic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qc8_igemm_minmax_fp32_ukernel_3x2__scalar_fmagic, xnn_init_qs8_minmax_scalar_fmagic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
}
-TEST(QC8_IGEMM_MINMAX_FP32_3X2__SCALAR_MAGIC, strided_cm_subtile) {
+TEST(QC8_IGEMM_MINMAX_FP32_3X2__SCALAR_FMAGIC, strided_cm_subtile) {
for (size_t k = 1; k <= 5; k += 2) {
for (uint32_t m = 1; m <= 3; m++) {
for (uint32_t n = 1; n <= 2; n++) {
@@ -79051,13 +79051,13 @@
.k(k)
.cm_stride(5)
.iterations(1)
- .Test(xnn_qc8_igemm_minmax_fp32_ukernel_3x2__scalar_magic, xnn_init_qs8_minmax_scalar_magic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qc8_igemm_minmax_fp32_ukernel_3x2__scalar_fmagic, xnn_init_qs8_minmax_scalar_fmagic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
}
}
-TEST(QC8_IGEMM_MINMAX_FP32_3X2__SCALAR_MAGIC, a_offset) {
+TEST(QC8_IGEMM_MINMAX_FP32_3X2__SCALAR_FMAGIC, a_offset) {
for (size_t k = 1; k <= 5; k += 2) {
GemmMicrokernelTester()
.mr(3)
@@ -79069,11 +79069,11 @@
.k(k)
.ks(3)
.a_offset(17)
- .Test(xnn_qc8_igemm_minmax_fp32_ukernel_3x2__scalar_magic, xnn_init_qs8_minmax_scalar_magic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qc8_igemm_minmax_fp32_ukernel_3x2__scalar_fmagic, xnn_init_qs8_minmax_scalar_fmagic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
-TEST(QC8_IGEMM_MINMAX_FP32_3X2__SCALAR_MAGIC, zero) {
+TEST(QC8_IGEMM_MINMAX_FP32_3X2__SCALAR_FMAGIC, zero) {
for (uint32_t mz = 0; mz < 3; mz++) {
for (size_t k = 1; k <= 5; k += 2) {
GemmMicrokernelTester()
@@ -79087,12 +79087,12 @@
.ks(3)
.a_offset(17)
.zero_index(mz)
- .Test(xnn_qc8_igemm_minmax_fp32_ukernel_3x2__scalar_magic, xnn_init_qs8_minmax_scalar_magic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qc8_igemm_minmax_fp32_ukernel_3x2__scalar_fmagic, xnn_init_qs8_minmax_scalar_fmagic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
}
-TEST(QC8_IGEMM_MINMAX_FP32_3X2__SCALAR_MAGIC, qmin) {
+TEST(QC8_IGEMM_MINMAX_FP32_3X2__SCALAR_FMAGIC, qmin) {
GemmMicrokernelTester()
.mr(3)
.nr(2)
@@ -79102,10 +79102,10 @@
.n(2)
.k(1)
.qmin(128)
- .Test(xnn_qc8_igemm_minmax_fp32_ukernel_3x2__scalar_magic, xnn_init_qs8_minmax_scalar_magic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qc8_igemm_minmax_fp32_ukernel_3x2__scalar_fmagic, xnn_init_qs8_minmax_scalar_fmagic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
-TEST(QC8_IGEMM_MINMAX_FP32_3X2__SCALAR_MAGIC, qmax) {
+TEST(QC8_IGEMM_MINMAX_FP32_3X2__SCALAR_FMAGIC, qmax) {
GemmMicrokernelTester()
.mr(3)
.nr(2)
@@ -79115,10 +79115,10 @@
.n(2)
.k(1)
.qmax(128)
- .Test(xnn_qc8_igemm_minmax_fp32_ukernel_3x2__scalar_magic, xnn_init_qs8_minmax_scalar_magic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qc8_igemm_minmax_fp32_ukernel_3x2__scalar_fmagic, xnn_init_qs8_minmax_scalar_fmagic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
-TEST(QC8_IGEMM_MINMAX_FP32_3X2__SCALAR_MAGIC, strided_cm) {
+TEST(QC8_IGEMM_MINMAX_FP32_3X2__SCALAR_FMAGIC, strided_cm) {
GemmMicrokernelTester()
.mr(3)
.nr(2)
@@ -79128,11 +79128,11 @@
.n(2)
.k(1)
.cm_stride(5)
- .Test(xnn_qc8_igemm_minmax_fp32_ukernel_3x2__scalar_magic, xnn_init_qs8_minmax_scalar_magic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qc8_igemm_minmax_fp32_ukernel_3x2__scalar_fmagic, xnn_init_qs8_minmax_scalar_fmagic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
-TEST(QC8_IGEMM_MINMAX_FP32_4X2__SCALAR_MAGIC, k_eq_1) {
+TEST(QC8_IGEMM_MINMAX_FP32_4X2__SCALAR_FMAGIC, k_eq_1) {
GemmMicrokernelTester()
.mr(4)
.nr(2)
@@ -79141,10 +79141,10 @@
.m(4)
.n(2)
.k(1)
- .Test(xnn_qc8_igemm_minmax_fp32_ukernel_4x2__scalar_magic, xnn_init_qs8_minmax_scalar_magic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qc8_igemm_minmax_fp32_ukernel_4x2__scalar_fmagic, xnn_init_qs8_minmax_scalar_fmagic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
-TEST(QC8_IGEMM_MINMAX_FP32_4X2__SCALAR_MAGIC, strided_cn) {
+TEST(QC8_IGEMM_MINMAX_FP32_4X2__SCALAR_FMAGIC, strided_cn) {
GemmMicrokernelTester()
.mr(4)
.nr(2)
@@ -79154,10 +79154,10 @@
.n(2)
.k(1)
.cn_stride(5)
- .Test(xnn_qc8_igemm_minmax_fp32_ukernel_4x2__scalar_magic, xnn_init_qs8_minmax_scalar_magic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qc8_igemm_minmax_fp32_ukernel_4x2__scalar_fmagic, xnn_init_qs8_minmax_scalar_fmagic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
-TEST(QC8_IGEMM_MINMAX_FP32_4X2__SCALAR_MAGIC, k_eq_1_subtile) {
+TEST(QC8_IGEMM_MINMAX_FP32_4X2__SCALAR_FMAGIC, k_eq_1_subtile) {
for (uint32_t m = 1; m <= 4; m++) {
for (uint32_t n = 1; n <= 2; n++) {
GemmMicrokernelTester()
@@ -79169,12 +79169,12 @@
.n(n)
.k(1)
.iterations(1)
- .Test(xnn_qc8_igemm_minmax_fp32_ukernel_4x2__scalar_magic, xnn_init_qs8_minmax_scalar_magic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qc8_igemm_minmax_fp32_ukernel_4x2__scalar_fmagic, xnn_init_qs8_minmax_scalar_fmagic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
}
-TEST(QC8_IGEMM_MINMAX_FP32_4X2__SCALAR_MAGIC, k_eq_1_subtile_m) {
+TEST(QC8_IGEMM_MINMAX_FP32_4X2__SCALAR_FMAGIC, k_eq_1_subtile_m) {
for (uint32_t m = 1; m <= 4; m++) {
GemmMicrokernelTester()
.mr(4)
@@ -79185,11 +79185,11 @@
.n(2)
.k(1)
.iterations(1)
- .Test(xnn_qc8_igemm_minmax_fp32_ukernel_4x2__scalar_magic, xnn_init_qs8_minmax_scalar_magic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qc8_igemm_minmax_fp32_ukernel_4x2__scalar_fmagic, xnn_init_qs8_minmax_scalar_fmagic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
-TEST(QC8_IGEMM_MINMAX_FP32_4X2__SCALAR_MAGIC, k_eq_1_subtile_n) {
+TEST(QC8_IGEMM_MINMAX_FP32_4X2__SCALAR_FMAGIC, k_eq_1_subtile_n) {
for (uint32_t n = 1; n <= 2; n++) {
GemmMicrokernelTester()
.mr(4)
@@ -79200,11 +79200,11 @@
.n(n)
.k(1)
.iterations(1)
- .Test(xnn_qc8_igemm_minmax_fp32_ukernel_4x2__scalar_magic, xnn_init_qs8_minmax_scalar_magic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qc8_igemm_minmax_fp32_ukernel_4x2__scalar_fmagic, xnn_init_qs8_minmax_scalar_fmagic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
-TEST(QC8_IGEMM_MINMAX_FP32_4X2__SCALAR_MAGIC, k_gt_1) {
+TEST(QC8_IGEMM_MINMAX_FP32_4X2__SCALAR_FMAGIC, k_gt_1) {
for (size_t k = 2; k < 10; k++) {
GemmMicrokernelTester()
.mr(4)
@@ -79214,11 +79214,11 @@
.m(4)
.n(2)
.k(k)
- .Test(xnn_qc8_igemm_minmax_fp32_ukernel_4x2__scalar_magic, xnn_init_qs8_minmax_scalar_magic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qc8_igemm_minmax_fp32_ukernel_4x2__scalar_fmagic, xnn_init_qs8_minmax_scalar_fmagic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
-TEST(QC8_IGEMM_MINMAX_FP32_4X2__SCALAR_MAGIC, k_gt_1_subtile) {
+TEST(QC8_IGEMM_MINMAX_FP32_4X2__SCALAR_FMAGIC, k_gt_1_subtile) {
for (size_t k = 2; k < 10; k++) {
for (uint32_t m = 1; m <= 4; m++) {
for (uint32_t n = 1; n <= 2; n++) {
@@ -79231,13 +79231,13 @@
.n(n)
.k(k)
.iterations(1)
- .Test(xnn_qc8_igemm_minmax_fp32_ukernel_4x2__scalar_magic, xnn_init_qs8_minmax_scalar_magic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qc8_igemm_minmax_fp32_ukernel_4x2__scalar_fmagic, xnn_init_qs8_minmax_scalar_fmagic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
}
}
-TEST(QC8_IGEMM_MINMAX_FP32_4X2__SCALAR_MAGIC, n_gt_2) {
+TEST(QC8_IGEMM_MINMAX_FP32_4X2__SCALAR_FMAGIC, n_gt_2) {
for (uint32_t n = 3; n < 4; n++) {
for (size_t k = 1; k <= 5; k += 2) {
GemmMicrokernelTester()
@@ -79248,12 +79248,12 @@
.m(4)
.n(2)
.k(k)
- .Test(xnn_qc8_igemm_minmax_fp32_ukernel_4x2__scalar_magic, xnn_init_qs8_minmax_scalar_magic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qc8_igemm_minmax_fp32_ukernel_4x2__scalar_fmagic, xnn_init_qs8_minmax_scalar_fmagic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
}
-TEST(QC8_IGEMM_MINMAX_FP32_4X2__SCALAR_MAGIC, n_gt_2_strided_cn) {
+TEST(QC8_IGEMM_MINMAX_FP32_4X2__SCALAR_FMAGIC, n_gt_2_strided_cn) {
for (uint32_t n = 3; n < 4; n++) {
for (size_t k = 1; k <= 5; k += 2) {
GemmMicrokernelTester()
@@ -79265,12 +79265,12 @@
.n(2)
.k(k)
.cn_stride(5)
- .Test(xnn_qc8_igemm_minmax_fp32_ukernel_4x2__scalar_magic, xnn_init_qs8_minmax_scalar_magic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qc8_igemm_minmax_fp32_ukernel_4x2__scalar_fmagic, xnn_init_qs8_minmax_scalar_fmagic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
}
-TEST(QC8_IGEMM_MINMAX_FP32_4X2__SCALAR_MAGIC, n_gt_2_subtile) {
+TEST(QC8_IGEMM_MINMAX_FP32_4X2__SCALAR_FMAGIC, n_gt_2_subtile) {
for (uint32_t n = 3; n < 4; n++) {
for (size_t k = 1; k <= 5; k += 2) {
for (uint32_t m = 1; m <= 4; m++) {
@@ -79283,13 +79283,13 @@
.n(n)
.k(k)
.iterations(1)
- .Test(xnn_qc8_igemm_minmax_fp32_ukernel_4x2__scalar_magic, xnn_init_qs8_minmax_scalar_magic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qc8_igemm_minmax_fp32_ukernel_4x2__scalar_fmagic, xnn_init_qs8_minmax_scalar_fmagic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
}
}
-TEST(QC8_IGEMM_MINMAX_FP32_4X2__SCALAR_MAGIC, n_div_2) {
+TEST(QC8_IGEMM_MINMAX_FP32_4X2__SCALAR_FMAGIC, n_div_2) {
for (uint32_t n = 4; n <= 6; n += 2) {
for (size_t k = 1; k <= 5; k += 2) {
GemmMicrokernelTester()
@@ -79300,12 +79300,12 @@
.m(4)
.n(2)
.k(k)
- .Test(xnn_qc8_igemm_minmax_fp32_ukernel_4x2__scalar_magic, xnn_init_qs8_minmax_scalar_magic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qc8_igemm_minmax_fp32_ukernel_4x2__scalar_fmagic, xnn_init_qs8_minmax_scalar_fmagic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
}
-TEST(QC8_IGEMM_MINMAX_FP32_4X2__SCALAR_MAGIC, n_div_2_strided_cn) {
+TEST(QC8_IGEMM_MINMAX_FP32_4X2__SCALAR_FMAGIC, n_div_2_strided_cn) {
for (uint32_t n = 4; n <= 6; n += 2) {
for (size_t k = 1; k <= 5; k += 2) {
GemmMicrokernelTester()
@@ -79317,12 +79317,12 @@
.n(n)
.k(k)
.cn_stride(5)
- .Test(xnn_qc8_igemm_minmax_fp32_ukernel_4x2__scalar_magic, xnn_init_qs8_minmax_scalar_magic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qc8_igemm_minmax_fp32_ukernel_4x2__scalar_fmagic, xnn_init_qs8_minmax_scalar_fmagic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
}
-TEST(QC8_IGEMM_MINMAX_FP32_4X2__SCALAR_MAGIC, n_div_2_subtile) {
+TEST(QC8_IGEMM_MINMAX_FP32_4X2__SCALAR_FMAGIC, n_div_2_subtile) {
for (uint32_t n = 4; n <= 6; n += 2) {
for (size_t k = 1; k <= 5; k += 2) {
for (uint32_t m = 1; m <= 4; m++) {
@@ -79335,13 +79335,13 @@
.n(n)
.k(k)
.iterations(1)
- .Test(xnn_qc8_igemm_minmax_fp32_ukernel_4x2__scalar_magic, xnn_init_qs8_minmax_scalar_magic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qc8_igemm_minmax_fp32_ukernel_4x2__scalar_fmagic, xnn_init_qs8_minmax_scalar_fmagic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
}
}
-TEST(QC8_IGEMM_MINMAX_FP32_4X2__SCALAR_MAGIC, small_kernel) {
+TEST(QC8_IGEMM_MINMAX_FP32_4X2__SCALAR_FMAGIC, small_kernel) {
for (size_t k = 1; k <= 5; k += 2) {
GemmMicrokernelTester()
.mr(4)
@@ -79352,11 +79352,11 @@
.n(2)
.k(k)
.ks(3)
- .Test(xnn_qc8_igemm_minmax_fp32_ukernel_4x2__scalar_magic, xnn_init_qs8_minmax_scalar_magic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qc8_igemm_minmax_fp32_ukernel_4x2__scalar_fmagic, xnn_init_qs8_minmax_scalar_fmagic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
-TEST(QC8_IGEMM_MINMAX_FP32_4X2__SCALAR_MAGIC, small_kernel_subtile) {
+TEST(QC8_IGEMM_MINMAX_FP32_4X2__SCALAR_FMAGIC, small_kernel_subtile) {
for (size_t k = 1; k <= 5; k += 2) {
for (uint32_t m = 1; m <= 4; m++) {
for (uint32_t n = 1; n <= 2; n++) {
@@ -79370,13 +79370,13 @@
.k(k)
.ks(3)
.iterations(1)
- .Test(xnn_qc8_igemm_minmax_fp32_ukernel_4x2__scalar_magic, xnn_init_qs8_minmax_scalar_magic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qc8_igemm_minmax_fp32_ukernel_4x2__scalar_fmagic, xnn_init_qs8_minmax_scalar_fmagic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
}
}
-TEST(QC8_IGEMM_MINMAX_FP32_4X2__SCALAR_MAGIC, n_gt_2_small_kernel) {
+TEST(QC8_IGEMM_MINMAX_FP32_4X2__SCALAR_FMAGIC, n_gt_2_small_kernel) {
for (uint32_t n = 3; n < 4; n++) {
for (size_t k = 1; k <= 5; k += 2) {
GemmMicrokernelTester()
@@ -79388,12 +79388,12 @@
.n(2)
.k(k)
.ks(3)
- .Test(xnn_qc8_igemm_minmax_fp32_ukernel_4x2__scalar_magic, xnn_init_qs8_minmax_scalar_magic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qc8_igemm_minmax_fp32_ukernel_4x2__scalar_fmagic, xnn_init_qs8_minmax_scalar_fmagic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
}
-TEST(QC8_IGEMM_MINMAX_FP32_4X2__SCALAR_MAGIC, n_div_2_small_kernel) {
+TEST(QC8_IGEMM_MINMAX_FP32_4X2__SCALAR_FMAGIC, n_div_2_small_kernel) {
for (uint32_t n = 4; n <= 6; n += 2) {
for (size_t k = 1; k <= 5; k += 2) {
GemmMicrokernelTester()
@@ -79405,12 +79405,12 @@
.n(2)
.k(k)
.ks(3)
- .Test(xnn_qc8_igemm_minmax_fp32_ukernel_4x2__scalar_magic, xnn_init_qs8_minmax_scalar_magic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qc8_igemm_minmax_fp32_ukernel_4x2__scalar_fmagic, xnn_init_qs8_minmax_scalar_fmagic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
}
-TEST(QC8_IGEMM_MINMAX_FP32_4X2__SCALAR_MAGIC, strided_cm_subtile) {
+TEST(QC8_IGEMM_MINMAX_FP32_4X2__SCALAR_FMAGIC, strided_cm_subtile) {
for (size_t k = 1; k <= 5; k += 2) {
for (uint32_t m = 1; m <= 4; m++) {
for (uint32_t n = 1; n <= 2; n++) {
@@ -79424,13 +79424,13 @@
.k(k)
.cm_stride(5)
.iterations(1)
- .Test(xnn_qc8_igemm_minmax_fp32_ukernel_4x2__scalar_magic, xnn_init_qs8_minmax_scalar_magic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qc8_igemm_minmax_fp32_ukernel_4x2__scalar_fmagic, xnn_init_qs8_minmax_scalar_fmagic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
}
}
-TEST(QC8_IGEMM_MINMAX_FP32_4X2__SCALAR_MAGIC, a_offset) {
+TEST(QC8_IGEMM_MINMAX_FP32_4X2__SCALAR_FMAGIC, a_offset) {
for (size_t k = 1; k <= 5; k += 2) {
GemmMicrokernelTester()
.mr(4)
@@ -79442,11 +79442,11 @@
.k(k)
.ks(3)
.a_offset(23)
- .Test(xnn_qc8_igemm_minmax_fp32_ukernel_4x2__scalar_magic, xnn_init_qs8_minmax_scalar_magic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qc8_igemm_minmax_fp32_ukernel_4x2__scalar_fmagic, xnn_init_qs8_minmax_scalar_fmagic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
-TEST(QC8_IGEMM_MINMAX_FP32_4X2__SCALAR_MAGIC, zero) {
+TEST(QC8_IGEMM_MINMAX_FP32_4X2__SCALAR_FMAGIC, zero) {
for (uint32_t mz = 0; mz < 4; mz++) {
for (size_t k = 1; k <= 5; k += 2) {
GemmMicrokernelTester()
@@ -79460,12 +79460,12 @@
.ks(3)
.a_offset(23)
.zero_index(mz)
- .Test(xnn_qc8_igemm_minmax_fp32_ukernel_4x2__scalar_magic, xnn_init_qs8_minmax_scalar_magic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qc8_igemm_minmax_fp32_ukernel_4x2__scalar_fmagic, xnn_init_qs8_minmax_scalar_fmagic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
}
-TEST(QC8_IGEMM_MINMAX_FP32_4X2__SCALAR_MAGIC, qmin) {
+TEST(QC8_IGEMM_MINMAX_FP32_4X2__SCALAR_FMAGIC, qmin) {
GemmMicrokernelTester()
.mr(4)
.nr(2)
@@ -79475,10 +79475,10 @@
.n(2)
.k(1)
.qmin(128)
- .Test(xnn_qc8_igemm_minmax_fp32_ukernel_4x2__scalar_magic, xnn_init_qs8_minmax_scalar_magic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qc8_igemm_minmax_fp32_ukernel_4x2__scalar_fmagic, xnn_init_qs8_minmax_scalar_fmagic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
-TEST(QC8_IGEMM_MINMAX_FP32_4X2__SCALAR_MAGIC, qmax) {
+TEST(QC8_IGEMM_MINMAX_FP32_4X2__SCALAR_FMAGIC, qmax) {
GemmMicrokernelTester()
.mr(4)
.nr(2)
@@ -79488,10 +79488,10 @@
.n(2)
.k(1)
.qmax(128)
- .Test(xnn_qc8_igemm_minmax_fp32_ukernel_4x2__scalar_magic, xnn_init_qs8_minmax_scalar_magic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qc8_igemm_minmax_fp32_ukernel_4x2__scalar_fmagic, xnn_init_qs8_minmax_scalar_fmagic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
-TEST(QC8_IGEMM_MINMAX_FP32_4X2__SCALAR_MAGIC, strided_cm) {
+TEST(QC8_IGEMM_MINMAX_FP32_4X2__SCALAR_FMAGIC, strided_cm) {
GemmMicrokernelTester()
.mr(4)
.nr(2)
@@ -79501,11 +79501,11 @@
.n(2)
.k(1)
.cm_stride(5)
- .Test(xnn_qc8_igemm_minmax_fp32_ukernel_4x2__scalar_magic, xnn_init_qs8_minmax_scalar_magic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qc8_igemm_minmax_fp32_ukernel_4x2__scalar_fmagic, xnn_init_qs8_minmax_scalar_fmagic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
-TEST(QC8_IGEMM_MINMAX_FP32_1X4__SCALAR_MAGIC, k_eq_1) {
+TEST(QC8_IGEMM_MINMAX_FP32_1X4__SCALAR_FMAGIC, k_eq_1) {
GemmMicrokernelTester()
.mr(1)
.nr(4)
@@ -79514,10 +79514,10 @@
.m(1)
.n(4)
.k(1)
- .Test(xnn_qc8_igemm_minmax_fp32_ukernel_1x4__scalar_magic, xnn_init_qs8_minmax_scalar_magic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qc8_igemm_minmax_fp32_ukernel_1x4__scalar_fmagic, xnn_init_qs8_minmax_scalar_fmagic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
-TEST(QC8_IGEMM_MINMAX_FP32_1X4__SCALAR_MAGIC, strided_cn) {
+TEST(QC8_IGEMM_MINMAX_FP32_1X4__SCALAR_FMAGIC, strided_cn) {
GemmMicrokernelTester()
.mr(1)
.nr(4)
@@ -79527,10 +79527,10 @@
.n(4)
.k(1)
.cn_stride(7)
- .Test(xnn_qc8_igemm_minmax_fp32_ukernel_1x4__scalar_magic, xnn_init_qs8_minmax_scalar_magic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qc8_igemm_minmax_fp32_ukernel_1x4__scalar_fmagic, xnn_init_qs8_minmax_scalar_fmagic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
-TEST(QC8_IGEMM_MINMAX_FP32_1X4__SCALAR_MAGIC, k_eq_1_subtile) {
+TEST(QC8_IGEMM_MINMAX_FP32_1X4__SCALAR_FMAGIC, k_eq_1_subtile) {
for (uint32_t m = 1; m <= 1; m++) {
for (uint32_t n = 1; n <= 4; n++) {
GemmMicrokernelTester()
@@ -79542,12 +79542,12 @@
.n(n)
.k(1)
.iterations(1)
- .Test(xnn_qc8_igemm_minmax_fp32_ukernel_1x4__scalar_magic, xnn_init_qs8_minmax_scalar_magic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qc8_igemm_minmax_fp32_ukernel_1x4__scalar_fmagic, xnn_init_qs8_minmax_scalar_fmagic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
}
-TEST(QC8_IGEMM_MINMAX_FP32_1X4__SCALAR_MAGIC, k_eq_1_subtile_m) {
+TEST(QC8_IGEMM_MINMAX_FP32_1X4__SCALAR_FMAGIC, k_eq_1_subtile_m) {
for (uint32_t m = 1; m <= 1; m++) {
GemmMicrokernelTester()
.mr(1)
@@ -79558,11 +79558,11 @@
.n(4)
.k(1)
.iterations(1)
- .Test(xnn_qc8_igemm_minmax_fp32_ukernel_1x4__scalar_magic, xnn_init_qs8_minmax_scalar_magic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qc8_igemm_minmax_fp32_ukernel_1x4__scalar_fmagic, xnn_init_qs8_minmax_scalar_fmagic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
-TEST(QC8_IGEMM_MINMAX_FP32_1X4__SCALAR_MAGIC, k_eq_1_subtile_n) {
+TEST(QC8_IGEMM_MINMAX_FP32_1X4__SCALAR_FMAGIC, k_eq_1_subtile_n) {
for (uint32_t n = 1; n <= 4; n++) {
GemmMicrokernelTester()
.mr(1)
@@ -79573,11 +79573,11 @@
.n(n)
.k(1)
.iterations(1)
- .Test(xnn_qc8_igemm_minmax_fp32_ukernel_1x4__scalar_magic, xnn_init_qs8_minmax_scalar_magic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qc8_igemm_minmax_fp32_ukernel_1x4__scalar_fmagic, xnn_init_qs8_minmax_scalar_fmagic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
-TEST(QC8_IGEMM_MINMAX_FP32_1X4__SCALAR_MAGIC, k_gt_1) {
+TEST(QC8_IGEMM_MINMAX_FP32_1X4__SCALAR_FMAGIC, k_gt_1) {
for (size_t k = 2; k < 10; k++) {
GemmMicrokernelTester()
.mr(1)
@@ -79587,11 +79587,11 @@
.m(1)
.n(4)
.k(k)
- .Test(xnn_qc8_igemm_minmax_fp32_ukernel_1x4__scalar_magic, xnn_init_qs8_minmax_scalar_magic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qc8_igemm_minmax_fp32_ukernel_1x4__scalar_fmagic, xnn_init_qs8_minmax_scalar_fmagic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
-TEST(QC8_IGEMM_MINMAX_FP32_1X4__SCALAR_MAGIC, k_gt_1_subtile) {
+TEST(QC8_IGEMM_MINMAX_FP32_1X4__SCALAR_FMAGIC, k_gt_1_subtile) {
for (size_t k = 2; k < 10; k++) {
for (uint32_t m = 1; m <= 1; m++) {
for (uint32_t n = 1; n <= 4; n++) {
@@ -79604,13 +79604,13 @@
.n(n)
.k(k)
.iterations(1)
- .Test(xnn_qc8_igemm_minmax_fp32_ukernel_1x4__scalar_magic, xnn_init_qs8_minmax_scalar_magic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qc8_igemm_minmax_fp32_ukernel_1x4__scalar_fmagic, xnn_init_qs8_minmax_scalar_fmagic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
}
}
-TEST(QC8_IGEMM_MINMAX_FP32_1X4__SCALAR_MAGIC, n_gt_4) {
+TEST(QC8_IGEMM_MINMAX_FP32_1X4__SCALAR_FMAGIC, n_gt_4) {
for (uint32_t n = 5; n < 8; n++) {
for (size_t k = 1; k <= 5; k += 2) {
GemmMicrokernelTester()
@@ -79621,12 +79621,12 @@
.m(1)
.n(4)
.k(k)
- .Test(xnn_qc8_igemm_minmax_fp32_ukernel_1x4__scalar_magic, xnn_init_qs8_minmax_scalar_magic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qc8_igemm_minmax_fp32_ukernel_1x4__scalar_fmagic, xnn_init_qs8_minmax_scalar_fmagic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
}
-TEST(QC8_IGEMM_MINMAX_FP32_1X4__SCALAR_MAGIC, n_gt_4_strided_cn) {
+TEST(QC8_IGEMM_MINMAX_FP32_1X4__SCALAR_FMAGIC, n_gt_4_strided_cn) {
for (uint32_t n = 5; n < 8; n++) {
for (size_t k = 1; k <= 5; k += 2) {
GemmMicrokernelTester()
@@ -79638,12 +79638,12 @@
.n(4)
.k(k)
.cn_stride(7)
- .Test(xnn_qc8_igemm_minmax_fp32_ukernel_1x4__scalar_magic, xnn_init_qs8_minmax_scalar_magic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qc8_igemm_minmax_fp32_ukernel_1x4__scalar_fmagic, xnn_init_qs8_minmax_scalar_fmagic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
}
-TEST(QC8_IGEMM_MINMAX_FP32_1X4__SCALAR_MAGIC, n_gt_4_subtile) {
+TEST(QC8_IGEMM_MINMAX_FP32_1X4__SCALAR_FMAGIC, n_gt_4_subtile) {
for (uint32_t n = 5; n < 8; n++) {
for (size_t k = 1; k <= 5; k += 2) {
for (uint32_t m = 1; m <= 1; m++) {
@@ -79656,13 +79656,13 @@
.n(n)
.k(k)
.iterations(1)
- .Test(xnn_qc8_igemm_minmax_fp32_ukernel_1x4__scalar_magic, xnn_init_qs8_minmax_scalar_magic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qc8_igemm_minmax_fp32_ukernel_1x4__scalar_fmagic, xnn_init_qs8_minmax_scalar_fmagic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
}
}
-TEST(QC8_IGEMM_MINMAX_FP32_1X4__SCALAR_MAGIC, n_div_4) {
+TEST(QC8_IGEMM_MINMAX_FP32_1X4__SCALAR_FMAGIC, n_div_4) {
for (uint32_t n = 8; n <= 12; n += 4) {
for (size_t k = 1; k <= 5; k += 2) {
GemmMicrokernelTester()
@@ -79673,12 +79673,12 @@
.m(1)
.n(4)
.k(k)
- .Test(xnn_qc8_igemm_minmax_fp32_ukernel_1x4__scalar_magic, xnn_init_qs8_minmax_scalar_magic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qc8_igemm_minmax_fp32_ukernel_1x4__scalar_fmagic, xnn_init_qs8_minmax_scalar_fmagic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
}
-TEST(QC8_IGEMM_MINMAX_FP32_1X4__SCALAR_MAGIC, n_div_4_strided_cn) {
+TEST(QC8_IGEMM_MINMAX_FP32_1X4__SCALAR_FMAGIC, n_div_4_strided_cn) {
for (uint32_t n = 8; n <= 12; n += 4) {
for (size_t k = 1; k <= 5; k += 2) {
GemmMicrokernelTester()
@@ -79690,12 +79690,12 @@
.n(n)
.k(k)
.cn_stride(7)
- .Test(xnn_qc8_igemm_minmax_fp32_ukernel_1x4__scalar_magic, xnn_init_qs8_minmax_scalar_magic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qc8_igemm_minmax_fp32_ukernel_1x4__scalar_fmagic, xnn_init_qs8_minmax_scalar_fmagic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
}
-TEST(QC8_IGEMM_MINMAX_FP32_1X4__SCALAR_MAGIC, n_div_4_subtile) {
+TEST(QC8_IGEMM_MINMAX_FP32_1X4__SCALAR_FMAGIC, n_div_4_subtile) {
for (uint32_t n = 8; n <= 12; n += 4) {
for (size_t k = 1; k <= 5; k += 2) {
for (uint32_t m = 1; m <= 1; m++) {
@@ -79708,13 +79708,13 @@
.n(n)
.k(k)
.iterations(1)
- .Test(xnn_qc8_igemm_minmax_fp32_ukernel_1x4__scalar_magic, xnn_init_qs8_minmax_scalar_magic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qc8_igemm_minmax_fp32_ukernel_1x4__scalar_fmagic, xnn_init_qs8_minmax_scalar_fmagic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
}
}
-TEST(QC8_IGEMM_MINMAX_FP32_1X4__SCALAR_MAGIC, small_kernel) {
+TEST(QC8_IGEMM_MINMAX_FP32_1X4__SCALAR_FMAGIC, small_kernel) {
for (size_t k = 1; k <= 5; k += 2) {
GemmMicrokernelTester()
.mr(1)
@@ -79725,11 +79725,11 @@
.n(4)
.k(k)
.ks(3)
- .Test(xnn_qc8_igemm_minmax_fp32_ukernel_1x4__scalar_magic, xnn_init_qs8_minmax_scalar_magic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qc8_igemm_minmax_fp32_ukernel_1x4__scalar_fmagic, xnn_init_qs8_minmax_scalar_fmagic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
-TEST(QC8_IGEMM_MINMAX_FP32_1X4__SCALAR_MAGIC, small_kernel_subtile) {
+TEST(QC8_IGEMM_MINMAX_FP32_1X4__SCALAR_FMAGIC, small_kernel_subtile) {
for (size_t k = 1; k <= 5; k += 2) {
for (uint32_t m = 1; m <= 1; m++) {
for (uint32_t n = 1; n <= 4; n++) {
@@ -79743,13 +79743,13 @@
.k(k)
.ks(3)
.iterations(1)
- .Test(xnn_qc8_igemm_minmax_fp32_ukernel_1x4__scalar_magic, xnn_init_qs8_minmax_scalar_magic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qc8_igemm_minmax_fp32_ukernel_1x4__scalar_fmagic, xnn_init_qs8_minmax_scalar_fmagic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
}
}
-TEST(QC8_IGEMM_MINMAX_FP32_1X4__SCALAR_MAGIC, n_gt_4_small_kernel) {
+TEST(QC8_IGEMM_MINMAX_FP32_1X4__SCALAR_FMAGIC, n_gt_4_small_kernel) {
for (uint32_t n = 5; n < 8; n++) {
for (size_t k = 1; k <= 5; k += 2) {
GemmMicrokernelTester()
@@ -79761,12 +79761,12 @@
.n(4)
.k(k)
.ks(3)
- .Test(xnn_qc8_igemm_minmax_fp32_ukernel_1x4__scalar_magic, xnn_init_qs8_minmax_scalar_magic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qc8_igemm_minmax_fp32_ukernel_1x4__scalar_fmagic, xnn_init_qs8_minmax_scalar_fmagic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
}
-TEST(QC8_IGEMM_MINMAX_FP32_1X4__SCALAR_MAGIC, n_div_4_small_kernel) {
+TEST(QC8_IGEMM_MINMAX_FP32_1X4__SCALAR_FMAGIC, n_div_4_small_kernel) {
for (uint32_t n = 8; n <= 12; n += 4) {
for (size_t k = 1; k <= 5; k += 2) {
GemmMicrokernelTester()
@@ -79778,12 +79778,12 @@
.n(4)
.k(k)
.ks(3)
- .Test(xnn_qc8_igemm_minmax_fp32_ukernel_1x4__scalar_magic, xnn_init_qs8_minmax_scalar_magic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qc8_igemm_minmax_fp32_ukernel_1x4__scalar_fmagic, xnn_init_qs8_minmax_scalar_fmagic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
}
-TEST(QC8_IGEMM_MINMAX_FP32_1X4__SCALAR_MAGIC, strided_cm_subtile) {
+TEST(QC8_IGEMM_MINMAX_FP32_1X4__SCALAR_FMAGIC, strided_cm_subtile) {
for (size_t k = 1; k <= 5; k += 2) {
for (uint32_t m = 1; m <= 1; m++) {
for (uint32_t n = 1; n <= 4; n++) {
@@ -79797,13 +79797,13 @@
.k(k)
.cm_stride(7)
.iterations(1)
- .Test(xnn_qc8_igemm_minmax_fp32_ukernel_1x4__scalar_magic, xnn_init_qs8_minmax_scalar_magic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qc8_igemm_minmax_fp32_ukernel_1x4__scalar_fmagic, xnn_init_qs8_minmax_scalar_fmagic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
}
}
-TEST(QC8_IGEMM_MINMAX_FP32_1X4__SCALAR_MAGIC, a_offset) {
+TEST(QC8_IGEMM_MINMAX_FP32_1X4__SCALAR_FMAGIC, a_offset) {
for (size_t k = 1; k <= 5; k += 2) {
GemmMicrokernelTester()
.mr(1)
@@ -79815,11 +79815,11 @@
.k(k)
.ks(3)
.a_offset(7)
- .Test(xnn_qc8_igemm_minmax_fp32_ukernel_1x4__scalar_magic, xnn_init_qs8_minmax_scalar_magic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qc8_igemm_minmax_fp32_ukernel_1x4__scalar_fmagic, xnn_init_qs8_minmax_scalar_fmagic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
-TEST(QC8_IGEMM_MINMAX_FP32_1X4__SCALAR_MAGIC, zero) {
+TEST(QC8_IGEMM_MINMAX_FP32_1X4__SCALAR_FMAGIC, zero) {
for (uint32_t mz = 0; mz < 1; mz++) {
for (size_t k = 1; k <= 5; k += 2) {
GemmMicrokernelTester()
@@ -79833,12 +79833,12 @@
.ks(3)
.a_offset(7)
.zero_index(mz)
- .Test(xnn_qc8_igemm_minmax_fp32_ukernel_1x4__scalar_magic, xnn_init_qs8_minmax_scalar_magic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qc8_igemm_minmax_fp32_ukernel_1x4__scalar_fmagic, xnn_init_qs8_minmax_scalar_fmagic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
}
-TEST(QC8_IGEMM_MINMAX_FP32_1X4__SCALAR_MAGIC, qmin) {
+TEST(QC8_IGEMM_MINMAX_FP32_1X4__SCALAR_FMAGIC, qmin) {
GemmMicrokernelTester()
.mr(1)
.nr(4)
@@ -79848,10 +79848,10 @@
.n(4)
.k(1)
.qmin(128)
- .Test(xnn_qc8_igemm_minmax_fp32_ukernel_1x4__scalar_magic, xnn_init_qs8_minmax_scalar_magic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qc8_igemm_minmax_fp32_ukernel_1x4__scalar_fmagic, xnn_init_qs8_minmax_scalar_fmagic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
-TEST(QC8_IGEMM_MINMAX_FP32_1X4__SCALAR_MAGIC, qmax) {
+TEST(QC8_IGEMM_MINMAX_FP32_1X4__SCALAR_FMAGIC, qmax) {
GemmMicrokernelTester()
.mr(1)
.nr(4)
@@ -79861,10 +79861,10 @@
.n(4)
.k(1)
.qmax(128)
- .Test(xnn_qc8_igemm_minmax_fp32_ukernel_1x4__scalar_magic, xnn_init_qs8_minmax_scalar_magic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qc8_igemm_minmax_fp32_ukernel_1x4__scalar_fmagic, xnn_init_qs8_minmax_scalar_fmagic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
-TEST(QC8_IGEMM_MINMAX_FP32_1X4__SCALAR_MAGIC, strided_cm) {
+TEST(QC8_IGEMM_MINMAX_FP32_1X4__SCALAR_FMAGIC, strided_cm) {
GemmMicrokernelTester()
.mr(1)
.nr(4)
@@ -79874,11 +79874,11 @@
.n(4)
.k(1)
.cm_stride(7)
- .Test(xnn_qc8_igemm_minmax_fp32_ukernel_1x4__scalar_magic, xnn_init_qs8_minmax_scalar_magic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qc8_igemm_minmax_fp32_ukernel_1x4__scalar_fmagic, xnn_init_qs8_minmax_scalar_fmagic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
-TEST(QC8_IGEMM_MINMAX_FP32_2X4__SCALAR_MAGIC, k_eq_1) {
+TEST(QC8_IGEMM_MINMAX_FP32_2X4__SCALAR_FMAGIC, k_eq_1) {
GemmMicrokernelTester()
.mr(2)
.nr(4)
@@ -79887,10 +79887,10 @@
.m(2)
.n(4)
.k(1)
- .Test(xnn_qc8_igemm_minmax_fp32_ukernel_2x4__scalar_magic, xnn_init_qs8_minmax_scalar_magic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qc8_igemm_minmax_fp32_ukernel_2x4__scalar_fmagic, xnn_init_qs8_minmax_scalar_fmagic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
-TEST(QC8_IGEMM_MINMAX_FP32_2X4__SCALAR_MAGIC, strided_cn) {
+TEST(QC8_IGEMM_MINMAX_FP32_2X4__SCALAR_FMAGIC, strided_cn) {
GemmMicrokernelTester()
.mr(2)
.nr(4)
@@ -79900,10 +79900,10 @@
.n(4)
.k(1)
.cn_stride(7)
- .Test(xnn_qc8_igemm_minmax_fp32_ukernel_2x4__scalar_magic, xnn_init_qs8_minmax_scalar_magic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qc8_igemm_minmax_fp32_ukernel_2x4__scalar_fmagic, xnn_init_qs8_minmax_scalar_fmagic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
-TEST(QC8_IGEMM_MINMAX_FP32_2X4__SCALAR_MAGIC, k_eq_1_subtile) {
+TEST(QC8_IGEMM_MINMAX_FP32_2X4__SCALAR_FMAGIC, k_eq_1_subtile) {
for (uint32_t m = 1; m <= 2; m++) {
for (uint32_t n = 1; n <= 4; n++) {
GemmMicrokernelTester()
@@ -79915,12 +79915,12 @@
.n(n)
.k(1)
.iterations(1)
- .Test(xnn_qc8_igemm_minmax_fp32_ukernel_2x4__scalar_magic, xnn_init_qs8_minmax_scalar_magic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qc8_igemm_minmax_fp32_ukernel_2x4__scalar_fmagic, xnn_init_qs8_minmax_scalar_fmagic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
}
-TEST(QC8_IGEMM_MINMAX_FP32_2X4__SCALAR_MAGIC, k_eq_1_subtile_m) {
+TEST(QC8_IGEMM_MINMAX_FP32_2X4__SCALAR_FMAGIC, k_eq_1_subtile_m) {
for (uint32_t m = 1; m <= 2; m++) {
GemmMicrokernelTester()
.mr(2)
@@ -79931,11 +79931,11 @@
.n(4)
.k(1)
.iterations(1)
- .Test(xnn_qc8_igemm_minmax_fp32_ukernel_2x4__scalar_magic, xnn_init_qs8_minmax_scalar_magic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qc8_igemm_minmax_fp32_ukernel_2x4__scalar_fmagic, xnn_init_qs8_minmax_scalar_fmagic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
-TEST(QC8_IGEMM_MINMAX_FP32_2X4__SCALAR_MAGIC, k_eq_1_subtile_n) {
+TEST(QC8_IGEMM_MINMAX_FP32_2X4__SCALAR_FMAGIC, k_eq_1_subtile_n) {
for (uint32_t n = 1; n <= 4; n++) {
GemmMicrokernelTester()
.mr(2)
@@ -79946,11 +79946,11 @@
.n(n)
.k(1)
.iterations(1)
- .Test(xnn_qc8_igemm_minmax_fp32_ukernel_2x4__scalar_magic, xnn_init_qs8_minmax_scalar_magic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qc8_igemm_minmax_fp32_ukernel_2x4__scalar_fmagic, xnn_init_qs8_minmax_scalar_fmagic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
-TEST(QC8_IGEMM_MINMAX_FP32_2X4__SCALAR_MAGIC, k_gt_1) {
+TEST(QC8_IGEMM_MINMAX_FP32_2X4__SCALAR_FMAGIC, k_gt_1) {
for (size_t k = 2; k < 10; k++) {
GemmMicrokernelTester()
.mr(2)
@@ -79960,11 +79960,11 @@
.m(2)
.n(4)
.k(k)
- .Test(xnn_qc8_igemm_minmax_fp32_ukernel_2x4__scalar_magic, xnn_init_qs8_minmax_scalar_magic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qc8_igemm_minmax_fp32_ukernel_2x4__scalar_fmagic, xnn_init_qs8_minmax_scalar_fmagic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
-TEST(QC8_IGEMM_MINMAX_FP32_2X4__SCALAR_MAGIC, k_gt_1_subtile) {
+TEST(QC8_IGEMM_MINMAX_FP32_2X4__SCALAR_FMAGIC, k_gt_1_subtile) {
for (size_t k = 2; k < 10; k++) {
for (uint32_t m = 1; m <= 2; m++) {
for (uint32_t n = 1; n <= 4; n++) {
@@ -79977,13 +79977,13 @@
.n(n)
.k(k)
.iterations(1)
- .Test(xnn_qc8_igemm_minmax_fp32_ukernel_2x4__scalar_magic, xnn_init_qs8_minmax_scalar_magic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qc8_igemm_minmax_fp32_ukernel_2x4__scalar_fmagic, xnn_init_qs8_minmax_scalar_fmagic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
}
}
-TEST(QC8_IGEMM_MINMAX_FP32_2X4__SCALAR_MAGIC, n_gt_4) {
+TEST(QC8_IGEMM_MINMAX_FP32_2X4__SCALAR_FMAGIC, n_gt_4) {
for (uint32_t n = 5; n < 8; n++) {
for (size_t k = 1; k <= 5; k += 2) {
GemmMicrokernelTester()
@@ -79994,12 +79994,12 @@
.m(2)
.n(4)
.k(k)
- .Test(xnn_qc8_igemm_minmax_fp32_ukernel_2x4__scalar_magic, xnn_init_qs8_minmax_scalar_magic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qc8_igemm_minmax_fp32_ukernel_2x4__scalar_fmagic, xnn_init_qs8_minmax_scalar_fmagic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
}
-TEST(QC8_IGEMM_MINMAX_FP32_2X4__SCALAR_MAGIC, n_gt_4_strided_cn) {
+TEST(QC8_IGEMM_MINMAX_FP32_2X4__SCALAR_FMAGIC, n_gt_4_strided_cn) {
for (uint32_t n = 5; n < 8; n++) {
for (size_t k = 1; k <= 5; k += 2) {
GemmMicrokernelTester()
@@ -80011,12 +80011,12 @@
.n(4)
.k(k)
.cn_stride(7)
- .Test(xnn_qc8_igemm_minmax_fp32_ukernel_2x4__scalar_magic, xnn_init_qs8_minmax_scalar_magic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qc8_igemm_minmax_fp32_ukernel_2x4__scalar_fmagic, xnn_init_qs8_minmax_scalar_fmagic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
}
-TEST(QC8_IGEMM_MINMAX_FP32_2X4__SCALAR_MAGIC, n_gt_4_subtile) {
+TEST(QC8_IGEMM_MINMAX_FP32_2X4__SCALAR_FMAGIC, n_gt_4_subtile) {
for (uint32_t n = 5; n < 8; n++) {
for (size_t k = 1; k <= 5; k += 2) {
for (uint32_t m = 1; m <= 2; m++) {
@@ -80029,13 +80029,13 @@
.n(n)
.k(k)
.iterations(1)
- .Test(xnn_qc8_igemm_minmax_fp32_ukernel_2x4__scalar_magic, xnn_init_qs8_minmax_scalar_magic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qc8_igemm_minmax_fp32_ukernel_2x4__scalar_fmagic, xnn_init_qs8_minmax_scalar_fmagic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
}
}
-TEST(QC8_IGEMM_MINMAX_FP32_2X4__SCALAR_MAGIC, n_div_4) {
+TEST(QC8_IGEMM_MINMAX_FP32_2X4__SCALAR_FMAGIC, n_div_4) {
for (uint32_t n = 8; n <= 12; n += 4) {
for (size_t k = 1; k <= 5; k += 2) {
GemmMicrokernelTester()
@@ -80046,12 +80046,12 @@
.m(2)
.n(4)
.k(k)
- .Test(xnn_qc8_igemm_minmax_fp32_ukernel_2x4__scalar_magic, xnn_init_qs8_minmax_scalar_magic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qc8_igemm_minmax_fp32_ukernel_2x4__scalar_fmagic, xnn_init_qs8_minmax_scalar_fmagic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
}
-TEST(QC8_IGEMM_MINMAX_FP32_2X4__SCALAR_MAGIC, n_div_4_strided_cn) {
+TEST(QC8_IGEMM_MINMAX_FP32_2X4__SCALAR_FMAGIC, n_div_4_strided_cn) {
for (uint32_t n = 8; n <= 12; n += 4) {
for (size_t k = 1; k <= 5; k += 2) {
GemmMicrokernelTester()
@@ -80063,12 +80063,12 @@
.n(n)
.k(k)
.cn_stride(7)
- .Test(xnn_qc8_igemm_minmax_fp32_ukernel_2x4__scalar_magic, xnn_init_qs8_minmax_scalar_magic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qc8_igemm_minmax_fp32_ukernel_2x4__scalar_fmagic, xnn_init_qs8_minmax_scalar_fmagic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
}
-TEST(QC8_IGEMM_MINMAX_FP32_2X4__SCALAR_MAGIC, n_div_4_subtile) {
+TEST(QC8_IGEMM_MINMAX_FP32_2X4__SCALAR_FMAGIC, n_div_4_subtile) {
for (uint32_t n = 8; n <= 12; n += 4) {
for (size_t k = 1; k <= 5; k += 2) {
for (uint32_t m = 1; m <= 2; m++) {
@@ -80081,13 +80081,13 @@
.n(n)
.k(k)
.iterations(1)
- .Test(xnn_qc8_igemm_minmax_fp32_ukernel_2x4__scalar_magic, xnn_init_qs8_minmax_scalar_magic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qc8_igemm_minmax_fp32_ukernel_2x4__scalar_fmagic, xnn_init_qs8_minmax_scalar_fmagic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
}
}
-TEST(QC8_IGEMM_MINMAX_FP32_2X4__SCALAR_MAGIC, small_kernel) {
+TEST(QC8_IGEMM_MINMAX_FP32_2X4__SCALAR_FMAGIC, small_kernel) {
for (size_t k = 1; k <= 5; k += 2) {
GemmMicrokernelTester()
.mr(2)
@@ -80098,11 +80098,11 @@
.n(4)
.k(k)
.ks(3)
- .Test(xnn_qc8_igemm_minmax_fp32_ukernel_2x4__scalar_magic, xnn_init_qs8_minmax_scalar_magic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qc8_igemm_minmax_fp32_ukernel_2x4__scalar_fmagic, xnn_init_qs8_minmax_scalar_fmagic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
-TEST(QC8_IGEMM_MINMAX_FP32_2X4__SCALAR_MAGIC, small_kernel_subtile) {
+TEST(QC8_IGEMM_MINMAX_FP32_2X4__SCALAR_FMAGIC, small_kernel_subtile) {
for (size_t k = 1; k <= 5; k += 2) {
for (uint32_t m = 1; m <= 2; m++) {
for (uint32_t n = 1; n <= 4; n++) {
@@ -80116,13 +80116,13 @@
.k(k)
.ks(3)
.iterations(1)
- .Test(xnn_qc8_igemm_minmax_fp32_ukernel_2x4__scalar_magic, xnn_init_qs8_minmax_scalar_magic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qc8_igemm_minmax_fp32_ukernel_2x4__scalar_fmagic, xnn_init_qs8_minmax_scalar_fmagic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
}
}
-TEST(QC8_IGEMM_MINMAX_FP32_2X4__SCALAR_MAGIC, n_gt_4_small_kernel) {
+TEST(QC8_IGEMM_MINMAX_FP32_2X4__SCALAR_FMAGIC, n_gt_4_small_kernel) {
for (uint32_t n = 5; n < 8; n++) {
for (size_t k = 1; k <= 5; k += 2) {
GemmMicrokernelTester()
@@ -80134,12 +80134,12 @@
.n(4)
.k(k)
.ks(3)
- .Test(xnn_qc8_igemm_minmax_fp32_ukernel_2x4__scalar_magic, xnn_init_qs8_minmax_scalar_magic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qc8_igemm_minmax_fp32_ukernel_2x4__scalar_fmagic, xnn_init_qs8_minmax_scalar_fmagic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
}
-TEST(QC8_IGEMM_MINMAX_FP32_2X4__SCALAR_MAGIC, n_div_4_small_kernel) {
+TEST(QC8_IGEMM_MINMAX_FP32_2X4__SCALAR_FMAGIC, n_div_4_small_kernel) {
for (uint32_t n = 8; n <= 12; n += 4) {
for (size_t k = 1; k <= 5; k += 2) {
GemmMicrokernelTester()
@@ -80151,12 +80151,12 @@
.n(4)
.k(k)
.ks(3)
- .Test(xnn_qc8_igemm_minmax_fp32_ukernel_2x4__scalar_magic, xnn_init_qs8_minmax_scalar_magic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qc8_igemm_minmax_fp32_ukernel_2x4__scalar_fmagic, xnn_init_qs8_minmax_scalar_fmagic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
}
-TEST(QC8_IGEMM_MINMAX_FP32_2X4__SCALAR_MAGIC, strided_cm_subtile) {
+TEST(QC8_IGEMM_MINMAX_FP32_2X4__SCALAR_FMAGIC, strided_cm_subtile) {
for (size_t k = 1; k <= 5; k += 2) {
for (uint32_t m = 1; m <= 2; m++) {
for (uint32_t n = 1; n <= 4; n++) {
@@ -80170,13 +80170,13 @@
.k(k)
.cm_stride(7)
.iterations(1)
- .Test(xnn_qc8_igemm_minmax_fp32_ukernel_2x4__scalar_magic, xnn_init_qs8_minmax_scalar_magic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qc8_igemm_minmax_fp32_ukernel_2x4__scalar_fmagic, xnn_init_qs8_minmax_scalar_fmagic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
}
}
-TEST(QC8_IGEMM_MINMAX_FP32_2X4__SCALAR_MAGIC, a_offset) {
+TEST(QC8_IGEMM_MINMAX_FP32_2X4__SCALAR_FMAGIC, a_offset) {
for (size_t k = 1; k <= 5; k += 2) {
GemmMicrokernelTester()
.mr(2)
@@ -80188,11 +80188,11 @@
.k(k)
.ks(3)
.a_offset(13)
- .Test(xnn_qc8_igemm_minmax_fp32_ukernel_2x4__scalar_magic, xnn_init_qs8_minmax_scalar_magic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qc8_igemm_minmax_fp32_ukernel_2x4__scalar_fmagic, xnn_init_qs8_minmax_scalar_fmagic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
-TEST(QC8_IGEMM_MINMAX_FP32_2X4__SCALAR_MAGIC, zero) {
+TEST(QC8_IGEMM_MINMAX_FP32_2X4__SCALAR_FMAGIC, zero) {
for (uint32_t mz = 0; mz < 2; mz++) {
for (size_t k = 1; k <= 5; k += 2) {
GemmMicrokernelTester()
@@ -80206,12 +80206,12 @@
.ks(3)
.a_offset(13)
.zero_index(mz)
- .Test(xnn_qc8_igemm_minmax_fp32_ukernel_2x4__scalar_magic, xnn_init_qs8_minmax_scalar_magic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qc8_igemm_minmax_fp32_ukernel_2x4__scalar_fmagic, xnn_init_qs8_minmax_scalar_fmagic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
}
-TEST(QC8_IGEMM_MINMAX_FP32_2X4__SCALAR_MAGIC, qmin) {
+TEST(QC8_IGEMM_MINMAX_FP32_2X4__SCALAR_FMAGIC, qmin) {
GemmMicrokernelTester()
.mr(2)
.nr(4)
@@ -80221,10 +80221,10 @@
.n(4)
.k(1)
.qmin(128)
- .Test(xnn_qc8_igemm_minmax_fp32_ukernel_2x4__scalar_magic, xnn_init_qs8_minmax_scalar_magic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qc8_igemm_minmax_fp32_ukernel_2x4__scalar_fmagic, xnn_init_qs8_minmax_scalar_fmagic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
-TEST(QC8_IGEMM_MINMAX_FP32_2X4__SCALAR_MAGIC, qmax) {
+TEST(QC8_IGEMM_MINMAX_FP32_2X4__SCALAR_FMAGIC, qmax) {
GemmMicrokernelTester()
.mr(2)
.nr(4)
@@ -80234,10 +80234,10 @@
.n(4)
.k(1)
.qmax(128)
- .Test(xnn_qc8_igemm_minmax_fp32_ukernel_2x4__scalar_magic, xnn_init_qs8_minmax_scalar_magic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qc8_igemm_minmax_fp32_ukernel_2x4__scalar_fmagic, xnn_init_qs8_minmax_scalar_fmagic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
-TEST(QC8_IGEMM_MINMAX_FP32_2X4__SCALAR_MAGIC, strided_cm) {
+TEST(QC8_IGEMM_MINMAX_FP32_2X4__SCALAR_FMAGIC, strided_cm) {
GemmMicrokernelTester()
.mr(2)
.nr(4)
@@ -80247,11 +80247,11 @@
.n(4)
.k(1)
.cm_stride(7)
- .Test(xnn_qc8_igemm_minmax_fp32_ukernel_2x4__scalar_magic, xnn_init_qs8_minmax_scalar_magic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qc8_igemm_minmax_fp32_ukernel_2x4__scalar_fmagic, xnn_init_qs8_minmax_scalar_fmagic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
-TEST(QC8_IGEMM_MINMAX_FP32_3X4__SCALAR_MAGIC, k_eq_1) {
+TEST(QC8_IGEMM_MINMAX_FP32_3X4__SCALAR_FMAGIC, k_eq_1) {
GemmMicrokernelTester()
.mr(3)
.nr(4)
@@ -80260,10 +80260,10 @@
.m(3)
.n(4)
.k(1)
- .Test(xnn_qc8_igemm_minmax_fp32_ukernel_3x4__scalar_magic, xnn_init_qs8_minmax_scalar_magic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qc8_igemm_minmax_fp32_ukernel_3x4__scalar_fmagic, xnn_init_qs8_minmax_scalar_fmagic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
-TEST(QC8_IGEMM_MINMAX_FP32_3X4__SCALAR_MAGIC, strided_cn) {
+TEST(QC8_IGEMM_MINMAX_FP32_3X4__SCALAR_FMAGIC, strided_cn) {
GemmMicrokernelTester()
.mr(3)
.nr(4)
@@ -80273,10 +80273,10 @@
.n(4)
.k(1)
.cn_stride(7)
- .Test(xnn_qc8_igemm_minmax_fp32_ukernel_3x4__scalar_magic, xnn_init_qs8_minmax_scalar_magic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qc8_igemm_minmax_fp32_ukernel_3x4__scalar_fmagic, xnn_init_qs8_minmax_scalar_fmagic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
-TEST(QC8_IGEMM_MINMAX_FP32_3X4__SCALAR_MAGIC, k_eq_1_subtile) {
+TEST(QC8_IGEMM_MINMAX_FP32_3X4__SCALAR_FMAGIC, k_eq_1_subtile) {
for (uint32_t m = 1; m <= 3; m++) {
for (uint32_t n = 1; n <= 4; n++) {
GemmMicrokernelTester()
@@ -80288,12 +80288,12 @@
.n(n)
.k(1)
.iterations(1)
- .Test(xnn_qc8_igemm_minmax_fp32_ukernel_3x4__scalar_magic, xnn_init_qs8_minmax_scalar_magic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qc8_igemm_minmax_fp32_ukernel_3x4__scalar_fmagic, xnn_init_qs8_minmax_scalar_fmagic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
}
-TEST(QC8_IGEMM_MINMAX_FP32_3X4__SCALAR_MAGIC, k_eq_1_subtile_m) {
+TEST(QC8_IGEMM_MINMAX_FP32_3X4__SCALAR_FMAGIC, k_eq_1_subtile_m) {
for (uint32_t m = 1; m <= 3; m++) {
GemmMicrokernelTester()
.mr(3)
@@ -80304,11 +80304,11 @@
.n(4)
.k(1)
.iterations(1)
- .Test(xnn_qc8_igemm_minmax_fp32_ukernel_3x4__scalar_magic, xnn_init_qs8_minmax_scalar_magic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qc8_igemm_minmax_fp32_ukernel_3x4__scalar_fmagic, xnn_init_qs8_minmax_scalar_fmagic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
-TEST(QC8_IGEMM_MINMAX_FP32_3X4__SCALAR_MAGIC, k_eq_1_subtile_n) {
+TEST(QC8_IGEMM_MINMAX_FP32_3X4__SCALAR_FMAGIC, k_eq_1_subtile_n) {
for (uint32_t n = 1; n <= 4; n++) {
GemmMicrokernelTester()
.mr(3)
@@ -80319,11 +80319,11 @@
.n(n)
.k(1)
.iterations(1)
- .Test(xnn_qc8_igemm_minmax_fp32_ukernel_3x4__scalar_magic, xnn_init_qs8_minmax_scalar_magic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qc8_igemm_minmax_fp32_ukernel_3x4__scalar_fmagic, xnn_init_qs8_minmax_scalar_fmagic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
-TEST(QC8_IGEMM_MINMAX_FP32_3X4__SCALAR_MAGIC, k_gt_1) {
+TEST(QC8_IGEMM_MINMAX_FP32_3X4__SCALAR_FMAGIC, k_gt_1) {
for (size_t k = 2; k < 10; k++) {
GemmMicrokernelTester()
.mr(3)
@@ -80333,11 +80333,11 @@
.m(3)
.n(4)
.k(k)
- .Test(xnn_qc8_igemm_minmax_fp32_ukernel_3x4__scalar_magic, xnn_init_qs8_minmax_scalar_magic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qc8_igemm_minmax_fp32_ukernel_3x4__scalar_fmagic, xnn_init_qs8_minmax_scalar_fmagic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
-TEST(QC8_IGEMM_MINMAX_FP32_3X4__SCALAR_MAGIC, k_gt_1_subtile) {
+TEST(QC8_IGEMM_MINMAX_FP32_3X4__SCALAR_FMAGIC, k_gt_1_subtile) {
for (size_t k = 2; k < 10; k++) {
for (uint32_t m = 1; m <= 3; m++) {
for (uint32_t n = 1; n <= 4; n++) {
@@ -80350,13 +80350,13 @@
.n(n)
.k(k)
.iterations(1)
- .Test(xnn_qc8_igemm_minmax_fp32_ukernel_3x4__scalar_magic, xnn_init_qs8_minmax_scalar_magic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qc8_igemm_minmax_fp32_ukernel_3x4__scalar_fmagic, xnn_init_qs8_minmax_scalar_fmagic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
}
}
-TEST(QC8_IGEMM_MINMAX_FP32_3X4__SCALAR_MAGIC, n_gt_4) {
+TEST(QC8_IGEMM_MINMAX_FP32_3X4__SCALAR_FMAGIC, n_gt_4) {
for (uint32_t n = 5; n < 8; n++) {
for (size_t k = 1; k <= 5; k += 2) {
GemmMicrokernelTester()
@@ -80367,12 +80367,12 @@
.m(3)
.n(4)
.k(k)
- .Test(xnn_qc8_igemm_minmax_fp32_ukernel_3x4__scalar_magic, xnn_init_qs8_minmax_scalar_magic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qc8_igemm_minmax_fp32_ukernel_3x4__scalar_fmagic, xnn_init_qs8_minmax_scalar_fmagic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
}
-TEST(QC8_IGEMM_MINMAX_FP32_3X4__SCALAR_MAGIC, n_gt_4_strided_cn) {
+TEST(QC8_IGEMM_MINMAX_FP32_3X4__SCALAR_FMAGIC, n_gt_4_strided_cn) {
for (uint32_t n = 5; n < 8; n++) {
for (size_t k = 1; k <= 5; k += 2) {
GemmMicrokernelTester()
@@ -80384,12 +80384,12 @@
.n(4)
.k(k)
.cn_stride(7)
- .Test(xnn_qc8_igemm_minmax_fp32_ukernel_3x4__scalar_magic, xnn_init_qs8_minmax_scalar_magic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qc8_igemm_minmax_fp32_ukernel_3x4__scalar_fmagic, xnn_init_qs8_minmax_scalar_fmagic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
}
-TEST(QC8_IGEMM_MINMAX_FP32_3X4__SCALAR_MAGIC, n_gt_4_subtile) {
+TEST(QC8_IGEMM_MINMAX_FP32_3X4__SCALAR_FMAGIC, n_gt_4_subtile) {
for (uint32_t n = 5; n < 8; n++) {
for (size_t k = 1; k <= 5; k += 2) {
for (uint32_t m = 1; m <= 3; m++) {
@@ -80402,13 +80402,13 @@
.n(n)
.k(k)
.iterations(1)
- .Test(xnn_qc8_igemm_minmax_fp32_ukernel_3x4__scalar_magic, xnn_init_qs8_minmax_scalar_magic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qc8_igemm_minmax_fp32_ukernel_3x4__scalar_fmagic, xnn_init_qs8_minmax_scalar_fmagic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
}
}
-TEST(QC8_IGEMM_MINMAX_FP32_3X4__SCALAR_MAGIC, n_div_4) {
+TEST(QC8_IGEMM_MINMAX_FP32_3X4__SCALAR_FMAGIC, n_div_4) {
for (uint32_t n = 8; n <= 12; n += 4) {
for (size_t k = 1; k <= 5; k += 2) {
GemmMicrokernelTester()
@@ -80419,12 +80419,12 @@
.m(3)
.n(4)
.k(k)
- .Test(xnn_qc8_igemm_minmax_fp32_ukernel_3x4__scalar_magic, xnn_init_qs8_minmax_scalar_magic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qc8_igemm_minmax_fp32_ukernel_3x4__scalar_fmagic, xnn_init_qs8_minmax_scalar_fmagic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
}
-TEST(QC8_IGEMM_MINMAX_FP32_3X4__SCALAR_MAGIC, n_div_4_strided_cn) {
+TEST(QC8_IGEMM_MINMAX_FP32_3X4__SCALAR_FMAGIC, n_div_4_strided_cn) {
for (uint32_t n = 8; n <= 12; n += 4) {
for (size_t k = 1; k <= 5; k += 2) {
GemmMicrokernelTester()
@@ -80436,12 +80436,12 @@
.n(n)
.k(k)
.cn_stride(7)
- .Test(xnn_qc8_igemm_minmax_fp32_ukernel_3x4__scalar_magic, xnn_init_qs8_minmax_scalar_magic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qc8_igemm_minmax_fp32_ukernel_3x4__scalar_fmagic, xnn_init_qs8_minmax_scalar_fmagic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
}
-TEST(QC8_IGEMM_MINMAX_FP32_3X4__SCALAR_MAGIC, n_div_4_subtile) {
+TEST(QC8_IGEMM_MINMAX_FP32_3X4__SCALAR_FMAGIC, n_div_4_subtile) {
for (uint32_t n = 8; n <= 12; n += 4) {
for (size_t k = 1; k <= 5; k += 2) {
for (uint32_t m = 1; m <= 3; m++) {
@@ -80454,13 +80454,13 @@
.n(n)
.k(k)
.iterations(1)
- .Test(xnn_qc8_igemm_minmax_fp32_ukernel_3x4__scalar_magic, xnn_init_qs8_minmax_scalar_magic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qc8_igemm_minmax_fp32_ukernel_3x4__scalar_fmagic, xnn_init_qs8_minmax_scalar_fmagic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
}
}
-TEST(QC8_IGEMM_MINMAX_FP32_3X4__SCALAR_MAGIC, small_kernel) {
+TEST(QC8_IGEMM_MINMAX_FP32_3X4__SCALAR_FMAGIC, small_kernel) {
for (size_t k = 1; k <= 5; k += 2) {
GemmMicrokernelTester()
.mr(3)
@@ -80471,11 +80471,11 @@
.n(4)
.k(k)
.ks(3)
- .Test(xnn_qc8_igemm_minmax_fp32_ukernel_3x4__scalar_magic, xnn_init_qs8_minmax_scalar_magic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qc8_igemm_minmax_fp32_ukernel_3x4__scalar_fmagic, xnn_init_qs8_minmax_scalar_fmagic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
-TEST(QC8_IGEMM_MINMAX_FP32_3X4__SCALAR_MAGIC, small_kernel_subtile) {
+TEST(QC8_IGEMM_MINMAX_FP32_3X4__SCALAR_FMAGIC, small_kernel_subtile) {
for (size_t k = 1; k <= 5; k += 2) {
for (uint32_t m = 1; m <= 3; m++) {
for (uint32_t n = 1; n <= 4; n++) {
@@ -80489,13 +80489,13 @@
.k(k)
.ks(3)
.iterations(1)
- .Test(xnn_qc8_igemm_minmax_fp32_ukernel_3x4__scalar_magic, xnn_init_qs8_minmax_scalar_magic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qc8_igemm_minmax_fp32_ukernel_3x4__scalar_fmagic, xnn_init_qs8_minmax_scalar_fmagic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
}
}
-TEST(QC8_IGEMM_MINMAX_FP32_3X4__SCALAR_MAGIC, n_gt_4_small_kernel) {
+TEST(QC8_IGEMM_MINMAX_FP32_3X4__SCALAR_FMAGIC, n_gt_4_small_kernel) {
for (uint32_t n = 5; n < 8; n++) {
for (size_t k = 1; k <= 5; k += 2) {
GemmMicrokernelTester()
@@ -80507,12 +80507,12 @@
.n(4)
.k(k)
.ks(3)
- .Test(xnn_qc8_igemm_minmax_fp32_ukernel_3x4__scalar_magic, xnn_init_qs8_minmax_scalar_magic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qc8_igemm_minmax_fp32_ukernel_3x4__scalar_fmagic, xnn_init_qs8_minmax_scalar_fmagic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
}
-TEST(QC8_IGEMM_MINMAX_FP32_3X4__SCALAR_MAGIC, n_div_4_small_kernel) {
+TEST(QC8_IGEMM_MINMAX_FP32_3X4__SCALAR_FMAGIC, n_div_4_small_kernel) {
for (uint32_t n = 8; n <= 12; n += 4) {
for (size_t k = 1; k <= 5; k += 2) {
GemmMicrokernelTester()
@@ -80524,12 +80524,12 @@
.n(4)
.k(k)
.ks(3)
- .Test(xnn_qc8_igemm_minmax_fp32_ukernel_3x4__scalar_magic, xnn_init_qs8_minmax_scalar_magic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qc8_igemm_minmax_fp32_ukernel_3x4__scalar_fmagic, xnn_init_qs8_minmax_scalar_fmagic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
}
-TEST(QC8_IGEMM_MINMAX_FP32_3X4__SCALAR_MAGIC, strided_cm_subtile) {
+TEST(QC8_IGEMM_MINMAX_FP32_3X4__SCALAR_FMAGIC, strided_cm_subtile) {
for (size_t k = 1; k <= 5; k += 2) {
for (uint32_t m = 1; m <= 3; m++) {
for (uint32_t n = 1; n <= 4; n++) {
@@ -80543,13 +80543,13 @@
.k(k)
.cm_stride(7)
.iterations(1)
- .Test(xnn_qc8_igemm_minmax_fp32_ukernel_3x4__scalar_magic, xnn_init_qs8_minmax_scalar_magic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qc8_igemm_minmax_fp32_ukernel_3x4__scalar_fmagic, xnn_init_qs8_minmax_scalar_fmagic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
}
}
-TEST(QC8_IGEMM_MINMAX_FP32_3X4__SCALAR_MAGIC, a_offset) {
+TEST(QC8_IGEMM_MINMAX_FP32_3X4__SCALAR_FMAGIC, a_offset) {
for (size_t k = 1; k <= 5; k += 2) {
GemmMicrokernelTester()
.mr(3)
@@ -80561,11 +80561,11 @@
.k(k)
.ks(3)
.a_offset(17)
- .Test(xnn_qc8_igemm_minmax_fp32_ukernel_3x4__scalar_magic, xnn_init_qs8_minmax_scalar_magic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qc8_igemm_minmax_fp32_ukernel_3x4__scalar_fmagic, xnn_init_qs8_minmax_scalar_fmagic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
-TEST(QC8_IGEMM_MINMAX_FP32_3X4__SCALAR_MAGIC, zero) {
+TEST(QC8_IGEMM_MINMAX_FP32_3X4__SCALAR_FMAGIC, zero) {
for (uint32_t mz = 0; mz < 3; mz++) {
for (size_t k = 1; k <= 5; k += 2) {
GemmMicrokernelTester()
@@ -80579,12 +80579,12 @@
.ks(3)
.a_offset(17)
.zero_index(mz)
- .Test(xnn_qc8_igemm_minmax_fp32_ukernel_3x4__scalar_magic, xnn_init_qs8_minmax_scalar_magic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qc8_igemm_minmax_fp32_ukernel_3x4__scalar_fmagic, xnn_init_qs8_minmax_scalar_fmagic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
}
-TEST(QC8_IGEMM_MINMAX_FP32_3X4__SCALAR_MAGIC, qmin) {
+TEST(QC8_IGEMM_MINMAX_FP32_3X4__SCALAR_FMAGIC, qmin) {
GemmMicrokernelTester()
.mr(3)
.nr(4)
@@ -80594,10 +80594,10 @@
.n(4)
.k(1)
.qmin(128)
- .Test(xnn_qc8_igemm_minmax_fp32_ukernel_3x4__scalar_magic, xnn_init_qs8_minmax_scalar_magic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qc8_igemm_minmax_fp32_ukernel_3x4__scalar_fmagic, xnn_init_qs8_minmax_scalar_fmagic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
-TEST(QC8_IGEMM_MINMAX_FP32_3X4__SCALAR_MAGIC, qmax) {
+TEST(QC8_IGEMM_MINMAX_FP32_3X4__SCALAR_FMAGIC, qmax) {
GemmMicrokernelTester()
.mr(3)
.nr(4)
@@ -80607,10 +80607,10 @@
.n(4)
.k(1)
.qmax(128)
- .Test(xnn_qc8_igemm_minmax_fp32_ukernel_3x4__scalar_magic, xnn_init_qs8_minmax_scalar_magic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qc8_igemm_minmax_fp32_ukernel_3x4__scalar_fmagic, xnn_init_qs8_minmax_scalar_fmagic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
-TEST(QC8_IGEMM_MINMAX_FP32_3X4__SCALAR_MAGIC, strided_cm) {
+TEST(QC8_IGEMM_MINMAX_FP32_3X4__SCALAR_FMAGIC, strided_cm) {
GemmMicrokernelTester()
.mr(3)
.nr(4)
@@ -80620,11 +80620,11 @@
.n(4)
.k(1)
.cm_stride(7)
- .Test(xnn_qc8_igemm_minmax_fp32_ukernel_3x4__scalar_magic, xnn_init_qs8_minmax_scalar_magic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qc8_igemm_minmax_fp32_ukernel_3x4__scalar_fmagic, xnn_init_qs8_minmax_scalar_fmagic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
-TEST(QC8_IGEMM_MINMAX_FP32_4X4__SCALAR_MAGIC, k_eq_1) {
+TEST(QC8_IGEMM_MINMAX_FP32_4X4__SCALAR_FMAGIC, k_eq_1) {
GemmMicrokernelTester()
.mr(4)
.nr(4)
@@ -80633,10 +80633,10 @@
.m(4)
.n(4)
.k(1)
- .Test(xnn_qc8_igemm_minmax_fp32_ukernel_4x4__scalar_magic, xnn_init_qs8_minmax_scalar_magic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qc8_igemm_minmax_fp32_ukernel_4x4__scalar_fmagic, xnn_init_qs8_minmax_scalar_fmagic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
-TEST(QC8_IGEMM_MINMAX_FP32_4X4__SCALAR_MAGIC, strided_cn) {
+TEST(QC8_IGEMM_MINMAX_FP32_4X4__SCALAR_FMAGIC, strided_cn) {
GemmMicrokernelTester()
.mr(4)
.nr(4)
@@ -80646,10 +80646,10 @@
.n(4)
.k(1)
.cn_stride(7)
- .Test(xnn_qc8_igemm_minmax_fp32_ukernel_4x4__scalar_magic, xnn_init_qs8_minmax_scalar_magic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qc8_igemm_minmax_fp32_ukernel_4x4__scalar_fmagic, xnn_init_qs8_minmax_scalar_fmagic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
-TEST(QC8_IGEMM_MINMAX_FP32_4X4__SCALAR_MAGIC, k_eq_1_subtile) {
+TEST(QC8_IGEMM_MINMAX_FP32_4X4__SCALAR_FMAGIC, k_eq_1_subtile) {
for (uint32_t m = 1; m <= 4; m++) {
for (uint32_t n = 1; n <= 4; n++) {
GemmMicrokernelTester()
@@ -80661,12 +80661,12 @@
.n(n)
.k(1)
.iterations(1)
- .Test(xnn_qc8_igemm_minmax_fp32_ukernel_4x4__scalar_magic, xnn_init_qs8_minmax_scalar_magic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qc8_igemm_minmax_fp32_ukernel_4x4__scalar_fmagic, xnn_init_qs8_minmax_scalar_fmagic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
}
-TEST(QC8_IGEMM_MINMAX_FP32_4X4__SCALAR_MAGIC, k_eq_1_subtile_m) {
+TEST(QC8_IGEMM_MINMAX_FP32_4X4__SCALAR_FMAGIC, k_eq_1_subtile_m) {
for (uint32_t m = 1; m <= 4; m++) {
GemmMicrokernelTester()
.mr(4)
@@ -80677,11 +80677,11 @@
.n(4)
.k(1)
.iterations(1)
- .Test(xnn_qc8_igemm_minmax_fp32_ukernel_4x4__scalar_magic, xnn_init_qs8_minmax_scalar_magic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qc8_igemm_minmax_fp32_ukernel_4x4__scalar_fmagic, xnn_init_qs8_minmax_scalar_fmagic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
-TEST(QC8_IGEMM_MINMAX_FP32_4X4__SCALAR_MAGIC, k_eq_1_subtile_n) {
+TEST(QC8_IGEMM_MINMAX_FP32_4X4__SCALAR_FMAGIC, k_eq_1_subtile_n) {
for (uint32_t n = 1; n <= 4; n++) {
GemmMicrokernelTester()
.mr(4)
@@ -80692,11 +80692,11 @@
.n(n)
.k(1)
.iterations(1)
- .Test(xnn_qc8_igemm_minmax_fp32_ukernel_4x4__scalar_magic, xnn_init_qs8_minmax_scalar_magic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qc8_igemm_minmax_fp32_ukernel_4x4__scalar_fmagic, xnn_init_qs8_minmax_scalar_fmagic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
-TEST(QC8_IGEMM_MINMAX_FP32_4X4__SCALAR_MAGIC, k_gt_1) {
+TEST(QC8_IGEMM_MINMAX_FP32_4X4__SCALAR_FMAGIC, k_gt_1) {
for (size_t k = 2; k < 10; k++) {
GemmMicrokernelTester()
.mr(4)
@@ -80706,11 +80706,11 @@
.m(4)
.n(4)
.k(k)
- .Test(xnn_qc8_igemm_minmax_fp32_ukernel_4x4__scalar_magic, xnn_init_qs8_minmax_scalar_magic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qc8_igemm_minmax_fp32_ukernel_4x4__scalar_fmagic, xnn_init_qs8_minmax_scalar_fmagic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
-TEST(QC8_IGEMM_MINMAX_FP32_4X4__SCALAR_MAGIC, k_gt_1_subtile) {
+TEST(QC8_IGEMM_MINMAX_FP32_4X4__SCALAR_FMAGIC, k_gt_1_subtile) {
for (size_t k = 2; k < 10; k++) {
for (uint32_t m = 1; m <= 4; m++) {
for (uint32_t n = 1; n <= 4; n++) {
@@ -80723,13 +80723,13 @@
.n(n)
.k(k)
.iterations(1)
- .Test(xnn_qc8_igemm_minmax_fp32_ukernel_4x4__scalar_magic, xnn_init_qs8_minmax_scalar_magic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qc8_igemm_minmax_fp32_ukernel_4x4__scalar_fmagic, xnn_init_qs8_minmax_scalar_fmagic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
}
}
-TEST(QC8_IGEMM_MINMAX_FP32_4X4__SCALAR_MAGIC, n_gt_4) {
+TEST(QC8_IGEMM_MINMAX_FP32_4X4__SCALAR_FMAGIC, n_gt_4) {
for (uint32_t n = 5; n < 8; n++) {
for (size_t k = 1; k <= 5; k += 2) {
GemmMicrokernelTester()
@@ -80740,12 +80740,12 @@
.m(4)
.n(4)
.k(k)
- .Test(xnn_qc8_igemm_minmax_fp32_ukernel_4x4__scalar_magic, xnn_init_qs8_minmax_scalar_magic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qc8_igemm_minmax_fp32_ukernel_4x4__scalar_fmagic, xnn_init_qs8_minmax_scalar_fmagic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
}
-TEST(QC8_IGEMM_MINMAX_FP32_4X4__SCALAR_MAGIC, n_gt_4_strided_cn) {
+TEST(QC8_IGEMM_MINMAX_FP32_4X4__SCALAR_FMAGIC, n_gt_4_strided_cn) {
for (uint32_t n = 5; n < 8; n++) {
for (size_t k = 1; k <= 5; k += 2) {
GemmMicrokernelTester()
@@ -80757,12 +80757,12 @@
.n(4)
.k(k)
.cn_stride(7)
- .Test(xnn_qc8_igemm_minmax_fp32_ukernel_4x4__scalar_magic, xnn_init_qs8_minmax_scalar_magic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qc8_igemm_minmax_fp32_ukernel_4x4__scalar_fmagic, xnn_init_qs8_minmax_scalar_fmagic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
}
-TEST(QC8_IGEMM_MINMAX_FP32_4X4__SCALAR_MAGIC, n_gt_4_subtile) {
+TEST(QC8_IGEMM_MINMAX_FP32_4X4__SCALAR_FMAGIC, n_gt_4_subtile) {
for (uint32_t n = 5; n < 8; n++) {
for (size_t k = 1; k <= 5; k += 2) {
for (uint32_t m = 1; m <= 4; m++) {
@@ -80775,13 +80775,13 @@
.n(n)
.k(k)
.iterations(1)
- .Test(xnn_qc8_igemm_minmax_fp32_ukernel_4x4__scalar_magic, xnn_init_qs8_minmax_scalar_magic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qc8_igemm_minmax_fp32_ukernel_4x4__scalar_fmagic, xnn_init_qs8_minmax_scalar_fmagic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
}
}
-TEST(QC8_IGEMM_MINMAX_FP32_4X4__SCALAR_MAGIC, n_div_4) {
+TEST(QC8_IGEMM_MINMAX_FP32_4X4__SCALAR_FMAGIC, n_div_4) {
for (uint32_t n = 8; n <= 12; n += 4) {
for (size_t k = 1; k <= 5; k += 2) {
GemmMicrokernelTester()
@@ -80792,12 +80792,12 @@
.m(4)
.n(4)
.k(k)
- .Test(xnn_qc8_igemm_minmax_fp32_ukernel_4x4__scalar_magic, xnn_init_qs8_minmax_scalar_magic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qc8_igemm_minmax_fp32_ukernel_4x4__scalar_fmagic, xnn_init_qs8_minmax_scalar_fmagic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
}
-TEST(QC8_IGEMM_MINMAX_FP32_4X4__SCALAR_MAGIC, n_div_4_strided_cn) {
+TEST(QC8_IGEMM_MINMAX_FP32_4X4__SCALAR_FMAGIC, n_div_4_strided_cn) {
for (uint32_t n = 8; n <= 12; n += 4) {
for (size_t k = 1; k <= 5; k += 2) {
GemmMicrokernelTester()
@@ -80809,12 +80809,12 @@
.n(n)
.k(k)
.cn_stride(7)
- .Test(xnn_qc8_igemm_minmax_fp32_ukernel_4x4__scalar_magic, xnn_init_qs8_minmax_scalar_magic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qc8_igemm_minmax_fp32_ukernel_4x4__scalar_fmagic, xnn_init_qs8_minmax_scalar_fmagic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
}
-TEST(QC8_IGEMM_MINMAX_FP32_4X4__SCALAR_MAGIC, n_div_4_subtile) {
+TEST(QC8_IGEMM_MINMAX_FP32_4X4__SCALAR_FMAGIC, n_div_4_subtile) {
for (uint32_t n = 8; n <= 12; n += 4) {
for (size_t k = 1; k <= 5; k += 2) {
for (uint32_t m = 1; m <= 4; m++) {
@@ -80827,13 +80827,13 @@
.n(n)
.k(k)
.iterations(1)
- .Test(xnn_qc8_igemm_minmax_fp32_ukernel_4x4__scalar_magic, xnn_init_qs8_minmax_scalar_magic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qc8_igemm_minmax_fp32_ukernel_4x4__scalar_fmagic, xnn_init_qs8_minmax_scalar_fmagic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
}
}
-TEST(QC8_IGEMM_MINMAX_FP32_4X4__SCALAR_MAGIC, small_kernel) {
+TEST(QC8_IGEMM_MINMAX_FP32_4X4__SCALAR_FMAGIC, small_kernel) {
for (size_t k = 1; k <= 5; k += 2) {
GemmMicrokernelTester()
.mr(4)
@@ -80844,11 +80844,11 @@
.n(4)
.k(k)
.ks(3)
- .Test(xnn_qc8_igemm_minmax_fp32_ukernel_4x4__scalar_magic, xnn_init_qs8_minmax_scalar_magic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qc8_igemm_minmax_fp32_ukernel_4x4__scalar_fmagic, xnn_init_qs8_minmax_scalar_fmagic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
-TEST(QC8_IGEMM_MINMAX_FP32_4X4__SCALAR_MAGIC, small_kernel_subtile) {
+TEST(QC8_IGEMM_MINMAX_FP32_4X4__SCALAR_FMAGIC, small_kernel_subtile) {
for (size_t k = 1; k <= 5; k += 2) {
for (uint32_t m = 1; m <= 4; m++) {
for (uint32_t n = 1; n <= 4; n++) {
@@ -80862,13 +80862,13 @@
.k(k)
.ks(3)
.iterations(1)
- .Test(xnn_qc8_igemm_minmax_fp32_ukernel_4x4__scalar_magic, xnn_init_qs8_minmax_scalar_magic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qc8_igemm_minmax_fp32_ukernel_4x4__scalar_fmagic, xnn_init_qs8_minmax_scalar_fmagic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
}
}
-TEST(QC8_IGEMM_MINMAX_FP32_4X4__SCALAR_MAGIC, n_gt_4_small_kernel) {
+TEST(QC8_IGEMM_MINMAX_FP32_4X4__SCALAR_FMAGIC, n_gt_4_small_kernel) {
for (uint32_t n = 5; n < 8; n++) {
for (size_t k = 1; k <= 5; k += 2) {
GemmMicrokernelTester()
@@ -80880,12 +80880,12 @@
.n(4)
.k(k)
.ks(3)
- .Test(xnn_qc8_igemm_minmax_fp32_ukernel_4x4__scalar_magic, xnn_init_qs8_minmax_scalar_magic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qc8_igemm_minmax_fp32_ukernel_4x4__scalar_fmagic, xnn_init_qs8_minmax_scalar_fmagic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
}
-TEST(QC8_IGEMM_MINMAX_FP32_4X4__SCALAR_MAGIC, n_div_4_small_kernel) {
+TEST(QC8_IGEMM_MINMAX_FP32_4X4__SCALAR_FMAGIC, n_div_4_small_kernel) {
for (uint32_t n = 8; n <= 12; n += 4) {
for (size_t k = 1; k <= 5; k += 2) {
GemmMicrokernelTester()
@@ -80897,12 +80897,12 @@
.n(4)
.k(k)
.ks(3)
- .Test(xnn_qc8_igemm_minmax_fp32_ukernel_4x4__scalar_magic, xnn_init_qs8_minmax_scalar_magic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qc8_igemm_minmax_fp32_ukernel_4x4__scalar_fmagic, xnn_init_qs8_minmax_scalar_fmagic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
}
-TEST(QC8_IGEMM_MINMAX_FP32_4X4__SCALAR_MAGIC, strided_cm_subtile) {
+TEST(QC8_IGEMM_MINMAX_FP32_4X4__SCALAR_FMAGIC, strided_cm_subtile) {
for (size_t k = 1; k <= 5; k += 2) {
for (uint32_t m = 1; m <= 4; m++) {
for (uint32_t n = 1; n <= 4; n++) {
@@ -80916,13 +80916,13 @@
.k(k)
.cm_stride(7)
.iterations(1)
- .Test(xnn_qc8_igemm_minmax_fp32_ukernel_4x4__scalar_magic, xnn_init_qs8_minmax_scalar_magic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qc8_igemm_minmax_fp32_ukernel_4x4__scalar_fmagic, xnn_init_qs8_minmax_scalar_fmagic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
}
}
-TEST(QC8_IGEMM_MINMAX_FP32_4X4__SCALAR_MAGIC, a_offset) {
+TEST(QC8_IGEMM_MINMAX_FP32_4X4__SCALAR_FMAGIC, a_offset) {
for (size_t k = 1; k <= 5; k += 2) {
GemmMicrokernelTester()
.mr(4)
@@ -80934,11 +80934,11 @@
.k(k)
.ks(3)
.a_offset(23)
- .Test(xnn_qc8_igemm_minmax_fp32_ukernel_4x4__scalar_magic, xnn_init_qs8_minmax_scalar_magic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qc8_igemm_minmax_fp32_ukernel_4x4__scalar_fmagic, xnn_init_qs8_minmax_scalar_fmagic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
-TEST(QC8_IGEMM_MINMAX_FP32_4X4__SCALAR_MAGIC, zero) {
+TEST(QC8_IGEMM_MINMAX_FP32_4X4__SCALAR_FMAGIC, zero) {
for (uint32_t mz = 0; mz < 4; mz++) {
for (size_t k = 1; k <= 5; k += 2) {
GemmMicrokernelTester()
@@ -80952,12 +80952,12 @@
.ks(3)
.a_offset(23)
.zero_index(mz)
- .Test(xnn_qc8_igemm_minmax_fp32_ukernel_4x4__scalar_magic, xnn_init_qs8_minmax_scalar_magic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qc8_igemm_minmax_fp32_ukernel_4x4__scalar_fmagic, xnn_init_qs8_minmax_scalar_fmagic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
}
-TEST(QC8_IGEMM_MINMAX_FP32_4X4__SCALAR_MAGIC, qmin) {
+TEST(QC8_IGEMM_MINMAX_FP32_4X4__SCALAR_FMAGIC, qmin) {
GemmMicrokernelTester()
.mr(4)
.nr(4)
@@ -80967,10 +80967,10 @@
.n(4)
.k(1)
.qmin(128)
- .Test(xnn_qc8_igemm_minmax_fp32_ukernel_4x4__scalar_magic, xnn_init_qs8_minmax_scalar_magic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qc8_igemm_minmax_fp32_ukernel_4x4__scalar_fmagic, xnn_init_qs8_minmax_scalar_fmagic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
-TEST(QC8_IGEMM_MINMAX_FP32_4X4__SCALAR_MAGIC, qmax) {
+TEST(QC8_IGEMM_MINMAX_FP32_4X4__SCALAR_FMAGIC, qmax) {
GemmMicrokernelTester()
.mr(4)
.nr(4)
@@ -80980,10 +80980,10 @@
.n(4)
.k(1)
.qmax(128)
- .Test(xnn_qc8_igemm_minmax_fp32_ukernel_4x4__scalar_magic, xnn_init_qs8_minmax_scalar_magic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qc8_igemm_minmax_fp32_ukernel_4x4__scalar_fmagic, xnn_init_qs8_minmax_scalar_fmagic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
-TEST(QC8_IGEMM_MINMAX_FP32_4X4__SCALAR_MAGIC, strided_cm) {
+TEST(QC8_IGEMM_MINMAX_FP32_4X4__SCALAR_FMAGIC, strided_cm) {
GemmMicrokernelTester()
.mr(4)
.nr(4)
@@ -80993,5 +80993,5 @@
.n(4)
.k(1)
.cm_stride(7)
- .Test(xnn_qc8_igemm_minmax_fp32_ukernel_4x4__scalar_magic, xnn_init_qs8_minmax_scalar_magic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qc8_igemm_minmax_fp32_ukernel_4x4__scalar_fmagic, xnn_init_qs8_minmax_scalar_fmagic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
diff --git a/test/qc8-igemm-minmax-fp32.yaml b/test/qc8-igemm-minmax-fp32.yaml
index edaf426..6c1861e 100644
--- a/test/qc8-igemm-minmax-fp32.yaml
+++ b/test/qc8-igemm-minmax-fp32.yaml
@@ -506,51 +506,51 @@
- name: xnn_qc8_igemm_minmax_fp32_ukernel_3x4c8__wasmsimd_mul16_ld128
init: xnn_init_qs8_minmax_wasmsimd_params
k-block: 8
-- name: xnn_qc8_igemm_minmax_fp32_ukernel_1x2__scalar_lrint
- init: xnn_init_qs8_minmax_scalar_lrint_params
+- name: xnn_qc8_igemm_minmax_fp32_ukernel_1x2__scalar_lrintf
+ init: xnn_init_qs8_minmax_scalar_lrintf_params
k-block: 1
-- name: xnn_qc8_igemm_minmax_fp32_ukernel_2x2__scalar_lrint
- init: xnn_init_qs8_minmax_scalar_lrint_params
+- name: xnn_qc8_igemm_minmax_fp32_ukernel_2x2__scalar_lrintf
+ init: xnn_init_qs8_minmax_scalar_lrintf_params
k-block: 1
-- name: xnn_qc8_igemm_minmax_fp32_ukernel_3x2__scalar_lrint
- init: xnn_init_qs8_minmax_scalar_lrint_params
+- name: xnn_qc8_igemm_minmax_fp32_ukernel_3x2__scalar_lrintf
+ init: xnn_init_qs8_minmax_scalar_lrintf_params
k-block: 1
-- name: xnn_qc8_igemm_minmax_fp32_ukernel_4x2__scalar_lrint
- init: xnn_init_qs8_minmax_scalar_lrint_params
+- name: xnn_qc8_igemm_minmax_fp32_ukernel_4x2__scalar_lrintf
+ init: xnn_init_qs8_minmax_scalar_lrintf_params
k-block: 1
-- name: xnn_qc8_igemm_minmax_fp32_ukernel_1x4__scalar_lrint
- init: xnn_init_qs8_minmax_scalar_lrint_params
+- name: xnn_qc8_igemm_minmax_fp32_ukernel_1x4__scalar_lrintf
+ init: xnn_init_qs8_minmax_scalar_lrintf_params
k-block: 1
-- name: xnn_qc8_igemm_minmax_fp32_ukernel_2x4__scalar_lrint
- init: xnn_init_qs8_minmax_scalar_lrint_params
+- name: xnn_qc8_igemm_minmax_fp32_ukernel_2x4__scalar_lrintf
+ init: xnn_init_qs8_minmax_scalar_lrintf_params
k-block: 1
-- name: xnn_qc8_igemm_minmax_fp32_ukernel_3x4__scalar_lrint
- init: xnn_init_qs8_minmax_scalar_lrint_params
+- name: xnn_qc8_igemm_minmax_fp32_ukernel_3x4__scalar_lrintf
+ init: xnn_init_qs8_minmax_scalar_lrintf_params
k-block: 1
-- name: xnn_qc8_igemm_minmax_fp32_ukernel_4x4__scalar_lrint
- init: xnn_init_qs8_minmax_scalar_lrint_params
+- name: xnn_qc8_igemm_minmax_fp32_ukernel_4x4__scalar_lrintf
+ init: xnn_init_qs8_minmax_scalar_lrintf_params
k-block: 1
-- name: xnn_qc8_igemm_minmax_fp32_ukernel_1x2__scalar_magic
- init: xnn_init_qs8_minmax_scalar_magic_params
+- name: xnn_qc8_igemm_minmax_fp32_ukernel_1x2__scalar_fmagic
+ init: xnn_init_qs8_minmax_scalar_fmagic_params
k-block: 1
-- name: xnn_qc8_igemm_minmax_fp32_ukernel_2x2__scalar_magic
- init: xnn_init_qs8_minmax_scalar_magic_params
+- name: xnn_qc8_igemm_minmax_fp32_ukernel_2x2__scalar_fmagic
+ init: xnn_init_qs8_minmax_scalar_fmagic_params
k-block: 1
-- name: xnn_qc8_igemm_minmax_fp32_ukernel_3x2__scalar_magic
- init: xnn_init_qs8_minmax_scalar_magic_params
+- name: xnn_qc8_igemm_minmax_fp32_ukernel_3x2__scalar_fmagic
+ init: xnn_init_qs8_minmax_scalar_fmagic_params
k-block: 1
-- name: xnn_qc8_igemm_minmax_fp32_ukernel_4x2__scalar_magic
- init: xnn_init_qs8_minmax_scalar_magic_params
+- name: xnn_qc8_igemm_minmax_fp32_ukernel_4x2__scalar_fmagic
+ init: xnn_init_qs8_minmax_scalar_fmagic_params
k-block: 1
-- name: xnn_qc8_igemm_minmax_fp32_ukernel_1x4__scalar_magic
- init: xnn_init_qs8_minmax_scalar_magic_params
+- name: xnn_qc8_igemm_minmax_fp32_ukernel_1x4__scalar_fmagic
+ init: xnn_init_qs8_minmax_scalar_fmagic_params
k-block: 1
-- name: xnn_qc8_igemm_minmax_fp32_ukernel_2x4__scalar_magic
- init: xnn_init_qs8_minmax_scalar_magic_params
+- name: xnn_qc8_igemm_minmax_fp32_ukernel_2x4__scalar_fmagic
+ init: xnn_init_qs8_minmax_scalar_fmagic_params
k-block: 1
-- name: xnn_qc8_igemm_minmax_fp32_ukernel_3x4__scalar_magic
- init: xnn_init_qs8_minmax_scalar_magic_params
+- name: xnn_qc8_igemm_minmax_fp32_ukernel_3x4__scalar_fmagic
+ init: xnn_init_qs8_minmax_scalar_fmagic_params
k-block: 1
-- name: xnn_qc8_igemm_minmax_fp32_ukernel_4x4__scalar_magic
- init: xnn_init_qs8_minmax_scalar_magic_params
+- name: xnn_qc8_igemm_minmax_fp32_ukernel_4x4__scalar_fmagic
+ init: xnn_init_qs8_minmax_scalar_fmagic_params
k-block: 1
diff --git a/test/qs8-dwconv-minmax-fp32.cc b/test/qs8-dwconv-minmax-fp32.cc
index a7307fb..cc067d6 100644
--- a/test/qs8-dwconv-minmax-fp32.cc
+++ b/test/qs8-dwconv-minmax-fp32.cc
@@ -9602,58 +9602,58 @@
#endif // XNN_ARCH_WASMSIMD || XNN_ARCH_WASMRELAXEDSIMD
-TEST(QS8_DWCONV_MINMAX_FP32_UP1X9__SCALAR_LRINT, c_eq_1) {
+TEST(QS8_DWCONV_MINMAX_FP32_UP1X9__SCALAR_LRINTF, c_eq_1) {
DWConvMicrokernelTester()
.cr(1)
.kr(9)
.channels(1)
- .Test(xnn_qs8_dwconv_minmax_fp32_ukernel_up1x9__scalar_lrint, xnn_init_qs8_conv_minmax_fp32_scalar_lrint_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qs8_dwconv_minmax_fp32_ukernel_up1x9__scalar_lrintf, xnn_init_qs8_conv_minmax_fp32_scalar_lrintf_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
-TEST(QS8_DWCONV_MINMAX_FP32_UP1X9__SCALAR_LRINT, c_gt_1) {
+TEST(QS8_DWCONV_MINMAX_FP32_UP1X9__SCALAR_LRINTF, c_gt_1) {
for (uint32_t channels = 2; channels < 10; channels++) {
DWConvMicrokernelTester()
.cr(1)
.kr(9)
.channels(channels)
- .Test(xnn_qs8_dwconv_minmax_fp32_ukernel_up1x9__scalar_lrint, xnn_init_qs8_conv_minmax_fp32_scalar_lrint_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qs8_dwconv_minmax_fp32_ukernel_up1x9__scalar_lrintf, xnn_init_qs8_conv_minmax_fp32_scalar_lrintf_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
-TEST(QS8_DWCONV_MINMAX_FP32_UP1X9__SCALAR_LRINT, c_gt_1_with_qmin) {
+TEST(QS8_DWCONV_MINMAX_FP32_UP1X9__SCALAR_LRINTF, c_gt_1_with_qmin) {
for (uint32_t channels = 2; channels < 10; channels++) {
DWConvMicrokernelTester()
.cr(1)
.kr(9)
.channels(channels)
.qmin(128)
- .Test(xnn_qs8_dwconv_minmax_fp32_ukernel_up1x9__scalar_lrint, xnn_init_qs8_conv_minmax_fp32_scalar_lrint_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qs8_dwconv_minmax_fp32_ukernel_up1x9__scalar_lrintf, xnn_init_qs8_conv_minmax_fp32_scalar_lrintf_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
-TEST(QS8_DWCONV_MINMAX_FP32_UP1X9__SCALAR_LRINT, c_gt_1_with_qmax) {
+TEST(QS8_DWCONV_MINMAX_FP32_UP1X9__SCALAR_LRINTF, c_gt_1_with_qmax) {
for (uint32_t channels = 2; channels < 10; channels++) {
DWConvMicrokernelTester()
.cr(1)
.kr(9)
.channels(channels)
.qmax(128)
- .Test(xnn_qs8_dwconv_minmax_fp32_ukernel_up1x9__scalar_lrint, xnn_init_qs8_conv_minmax_fp32_scalar_lrint_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qs8_dwconv_minmax_fp32_ukernel_up1x9__scalar_lrintf, xnn_init_qs8_conv_minmax_fp32_scalar_lrintf_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
-TEST(QS8_DWCONV_MINMAX_FP32_UP1X9__SCALAR_LRINT, multipixel) {
+TEST(QS8_DWCONV_MINMAX_FP32_UP1X9__SCALAR_LRINTF, multipixel) {
for (size_t channels = 1; channels <= 5; channels += 1) {
DWConvMicrokernelTester()
.cr(1)
.kr(9)
.channels(channels)
.width(3)
- .Test(xnn_qs8_dwconv_minmax_fp32_ukernel_up1x9__scalar_lrint, xnn_init_qs8_conv_minmax_fp32_scalar_lrint_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qs8_dwconv_minmax_fp32_ukernel_up1x9__scalar_lrintf, xnn_init_qs8_conv_minmax_fp32_scalar_lrintf_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
-TEST(QS8_DWCONV_MINMAX_FP32_UP1X9__SCALAR_LRINT, multipixel_with_step) {
+TEST(QS8_DWCONV_MINMAX_FP32_UP1X9__SCALAR_LRINTF, multipixel_with_step) {
for (size_t channels = 1; channels <= 5; channels += 1) {
for (size_t step = 2; step <= 9; step++) {
DWConvMicrokernelTester()
@@ -9662,12 +9662,12 @@
.channels(channels)
.width(3)
.step(step)
- .Test(xnn_qs8_dwconv_minmax_fp32_ukernel_up1x9__scalar_lrint, xnn_init_qs8_conv_minmax_fp32_scalar_lrint_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qs8_dwconv_minmax_fp32_ukernel_up1x9__scalar_lrintf, xnn_init_qs8_conv_minmax_fp32_scalar_lrintf_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
}
-TEST(QS8_DWCONV_MINMAX_FP32_UP1X9__SCALAR_LRINT, multipixel_with_output_stride) {
+TEST(QS8_DWCONV_MINMAX_FP32_UP1X9__SCALAR_LRINTF, multipixel_with_output_stride) {
for (size_t channels = 1; channels <= 5; channels += 1) {
DWConvMicrokernelTester()
.cr(1)
@@ -9675,11 +9675,11 @@
.channels(1)
.width(5)
.output_stride(7)
- .Test(xnn_qs8_dwconv_minmax_fp32_ukernel_up1x9__scalar_lrint, xnn_init_qs8_conv_minmax_fp32_scalar_lrint_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qs8_dwconv_minmax_fp32_ukernel_up1x9__scalar_lrintf, xnn_init_qs8_conv_minmax_fp32_scalar_lrintf_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
-TEST(QS8_DWCONV_MINMAX_FP32_UP1X9__SCALAR_LRINT, multipixel_with_qmin) {
+TEST(QS8_DWCONV_MINMAX_FP32_UP1X9__SCALAR_LRINTF, multipixel_with_qmin) {
for (size_t channels = 1; channels <= 5; channels += 1) {
DWConvMicrokernelTester()
.cr(1)
@@ -9687,11 +9687,11 @@
.channels(channels)
.width(3)
.qmin(128)
- .Test(xnn_qs8_dwconv_minmax_fp32_ukernel_up1x9__scalar_lrint, xnn_init_qs8_conv_minmax_fp32_scalar_lrint_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qs8_dwconv_minmax_fp32_ukernel_up1x9__scalar_lrintf, xnn_init_qs8_conv_minmax_fp32_scalar_lrintf_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
-TEST(QS8_DWCONV_MINMAX_FP32_UP1X9__SCALAR_LRINT, multipixel_with_qmax) {
+TEST(QS8_DWCONV_MINMAX_FP32_UP1X9__SCALAR_LRINTF, multipixel_with_qmax) {
for (size_t channels = 1; channels <= 5; channels += 1) {
DWConvMicrokernelTester()
.cr(1)
@@ -9699,22 +9699,22 @@
.channels(channels)
.width(3)
.qmax(128)
- .Test(xnn_qs8_dwconv_minmax_fp32_ukernel_up1x9__scalar_lrint, xnn_init_qs8_conv_minmax_fp32_scalar_lrint_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qs8_dwconv_minmax_fp32_ukernel_up1x9__scalar_lrintf, xnn_init_qs8_conv_minmax_fp32_scalar_lrintf_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
-TEST(QS8_DWCONV_MINMAX_FP32_UP1X9__SCALAR_LRINT, input_offset) {
+TEST(QS8_DWCONV_MINMAX_FP32_UP1X9__SCALAR_LRINTF, input_offset) {
for (uint32_t channels = 2; channels < 16; channels += 3) {
DWConvMicrokernelTester()
.cr(1)
.kr(9)
.channels(channels)
.input_offset(48)
- .Test(xnn_qs8_dwconv_minmax_fp32_ukernel_up1x9__scalar_lrint, xnn_init_qs8_conv_minmax_fp32_scalar_lrint_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qs8_dwconv_minmax_fp32_ukernel_up1x9__scalar_lrintf, xnn_init_qs8_conv_minmax_fp32_scalar_lrintf_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
-TEST(QS8_DWCONV_MINMAX_FP32_UP1X9__SCALAR_LRINT, zero) {
+TEST(QS8_DWCONV_MINMAX_FP32_UP1X9__SCALAR_LRINTF, zero) {
for (uint32_t mz = 0; mz < 9; mz++) {
for (uint32_t channels = 2; channels < 16; channels += 3) {
DWConvMicrokernelTester()
@@ -9723,105 +9723,105 @@
.channels(channels)
.input_offset(48)
.zero_index(mz)
- .Test(xnn_qs8_dwconv_minmax_fp32_ukernel_up1x9__scalar_lrint, xnn_init_qs8_conv_minmax_fp32_scalar_lrint_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qs8_dwconv_minmax_fp32_ukernel_up1x9__scalar_lrintf, xnn_init_qs8_conv_minmax_fp32_scalar_lrintf_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
}
-TEST(QS8_DWCONV_MINMAX_FP32_UP2X9__SCALAR_LRINT, c_eq_2) {
+TEST(QS8_DWCONV_MINMAX_FP32_UP2X9__SCALAR_LRINTF, c_eq_2) {
DWConvMicrokernelTester()
.cr(2)
.kr(9)
.channels(2)
- .Test(xnn_qs8_dwconv_minmax_fp32_ukernel_up2x9__scalar_lrint, xnn_init_qs8_conv_minmax_fp32_scalar_lrint_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qs8_dwconv_minmax_fp32_ukernel_up2x9__scalar_lrintf, xnn_init_qs8_conv_minmax_fp32_scalar_lrintf_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
-TEST(QS8_DWCONV_MINMAX_FP32_UP2X9__SCALAR_LRINT, c_div_2) {
+TEST(QS8_DWCONV_MINMAX_FP32_UP2X9__SCALAR_LRINTF, c_div_2) {
for (uint32_t channels = 4; channels < 32; channels += 6) {
DWConvMicrokernelTester()
.cr(2)
.kr(9)
.channels(channels)
- .Test(xnn_qs8_dwconv_minmax_fp32_ukernel_up2x9__scalar_lrint, xnn_init_qs8_conv_minmax_fp32_scalar_lrint_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qs8_dwconv_minmax_fp32_ukernel_up2x9__scalar_lrintf, xnn_init_qs8_conv_minmax_fp32_scalar_lrintf_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
-TEST(QS8_DWCONV_MINMAX_FP32_UP2X9__SCALAR_LRINT, c_div_2_with_qmin) {
+TEST(QS8_DWCONV_MINMAX_FP32_UP2X9__SCALAR_LRINTF, c_div_2_with_qmin) {
for (uint32_t channels = 4; channels < 32; channels += 6) {
DWConvMicrokernelTester()
.cr(2)
.kr(9)
.channels(channels)
.qmin(128)
- .Test(xnn_qs8_dwconv_minmax_fp32_ukernel_up2x9__scalar_lrint, xnn_init_qs8_conv_minmax_fp32_scalar_lrint_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qs8_dwconv_minmax_fp32_ukernel_up2x9__scalar_lrintf, xnn_init_qs8_conv_minmax_fp32_scalar_lrintf_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
-TEST(QS8_DWCONV_MINMAX_FP32_UP2X9__SCALAR_LRINT, c_div_2_with_qmax) {
+TEST(QS8_DWCONV_MINMAX_FP32_UP2X9__SCALAR_LRINTF, c_div_2_with_qmax) {
for (uint32_t channels = 4; channels < 32; channels += 6) {
DWConvMicrokernelTester()
.cr(2)
.kr(9)
.channels(channels)
.qmax(128)
- .Test(xnn_qs8_dwconv_minmax_fp32_ukernel_up2x9__scalar_lrint, xnn_init_qs8_conv_minmax_fp32_scalar_lrint_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qs8_dwconv_minmax_fp32_ukernel_up2x9__scalar_lrintf, xnn_init_qs8_conv_minmax_fp32_scalar_lrintf_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
-TEST(QS8_DWCONV_MINMAX_FP32_UP2X9__SCALAR_LRINT, c_lt_2) {
+TEST(QS8_DWCONV_MINMAX_FP32_UP2X9__SCALAR_LRINTF, c_lt_2) {
for (uint32_t channels = 1; channels < 2; channels++) {
DWConvMicrokernelTester()
.cr(2)
.kr(9)
.channels(channels)
- .Test(xnn_qs8_dwconv_minmax_fp32_ukernel_up2x9__scalar_lrint, xnn_init_qs8_conv_minmax_fp32_scalar_lrint_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qs8_dwconv_minmax_fp32_ukernel_up2x9__scalar_lrintf, xnn_init_qs8_conv_minmax_fp32_scalar_lrintf_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
-TEST(QS8_DWCONV_MINMAX_FP32_UP2X9__SCALAR_LRINT, c_gt_2) {
+TEST(QS8_DWCONV_MINMAX_FP32_UP2X9__SCALAR_LRINTF, c_gt_2) {
for (uint32_t channels = 3; channels < 4; channels++) {
DWConvMicrokernelTester()
.cr(2)
.kr(9)
.channels(channels)
- .Test(xnn_qs8_dwconv_minmax_fp32_ukernel_up2x9__scalar_lrint, xnn_init_qs8_conv_minmax_fp32_scalar_lrint_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qs8_dwconv_minmax_fp32_ukernel_up2x9__scalar_lrintf, xnn_init_qs8_conv_minmax_fp32_scalar_lrintf_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
-TEST(QS8_DWCONV_MINMAX_FP32_UP2X9__SCALAR_LRINT, c_gt_2_with_qmin) {
+TEST(QS8_DWCONV_MINMAX_FP32_UP2X9__SCALAR_LRINTF, c_gt_2_with_qmin) {
for (uint32_t channels = 3; channels < 4; channels++) {
DWConvMicrokernelTester()
.cr(2)
.kr(9)
.channels(channels)
.qmin(128)
- .Test(xnn_qs8_dwconv_minmax_fp32_ukernel_up2x9__scalar_lrint, xnn_init_qs8_conv_minmax_fp32_scalar_lrint_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qs8_dwconv_minmax_fp32_ukernel_up2x9__scalar_lrintf, xnn_init_qs8_conv_minmax_fp32_scalar_lrintf_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
-TEST(QS8_DWCONV_MINMAX_FP32_UP2X9__SCALAR_LRINT, c_gt_2_with_qmax) {
+TEST(QS8_DWCONV_MINMAX_FP32_UP2X9__SCALAR_LRINTF, c_gt_2_with_qmax) {
for (uint32_t channels = 3; channels < 4; channels++) {
DWConvMicrokernelTester()
.cr(2)
.kr(9)
.channels(channels)
.qmax(128)
- .Test(xnn_qs8_dwconv_minmax_fp32_ukernel_up2x9__scalar_lrint, xnn_init_qs8_conv_minmax_fp32_scalar_lrint_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qs8_dwconv_minmax_fp32_ukernel_up2x9__scalar_lrintf, xnn_init_qs8_conv_minmax_fp32_scalar_lrintf_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
-TEST(QS8_DWCONV_MINMAX_FP32_UP2X9__SCALAR_LRINT, multipixel) {
+TEST(QS8_DWCONV_MINMAX_FP32_UP2X9__SCALAR_LRINTF, multipixel) {
for (size_t channels = 1; channels <= 10; channels += 1) {
DWConvMicrokernelTester()
.cr(2)
.kr(9)
.channels(channels)
.width(3)
- .Test(xnn_qs8_dwconv_minmax_fp32_ukernel_up2x9__scalar_lrint, xnn_init_qs8_conv_minmax_fp32_scalar_lrint_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qs8_dwconv_minmax_fp32_ukernel_up2x9__scalar_lrintf, xnn_init_qs8_conv_minmax_fp32_scalar_lrintf_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
-TEST(QS8_DWCONV_MINMAX_FP32_UP2X9__SCALAR_LRINT, multipixel_with_step) {
+TEST(QS8_DWCONV_MINMAX_FP32_UP2X9__SCALAR_LRINTF, multipixel_with_step) {
for (size_t channels = 1; channels <= 10; channels += 1) {
for (size_t step = 2; step <= 9; step++) {
DWConvMicrokernelTester()
@@ -9830,12 +9830,12 @@
.channels(channels)
.width(3)
.step(step)
- .Test(xnn_qs8_dwconv_minmax_fp32_ukernel_up2x9__scalar_lrint, xnn_init_qs8_conv_minmax_fp32_scalar_lrint_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qs8_dwconv_minmax_fp32_ukernel_up2x9__scalar_lrintf, xnn_init_qs8_conv_minmax_fp32_scalar_lrintf_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
}
-TEST(QS8_DWCONV_MINMAX_FP32_UP2X9__SCALAR_LRINT, multipixel_with_output_stride) {
+TEST(QS8_DWCONV_MINMAX_FP32_UP2X9__SCALAR_LRINTF, multipixel_with_output_stride) {
for (size_t channels = 1; channels <= 10; channels += 1) {
DWConvMicrokernelTester()
.cr(2)
@@ -9843,11 +9843,11 @@
.channels(2)
.width(5)
.output_stride(13)
- .Test(xnn_qs8_dwconv_minmax_fp32_ukernel_up2x9__scalar_lrint, xnn_init_qs8_conv_minmax_fp32_scalar_lrint_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qs8_dwconv_minmax_fp32_ukernel_up2x9__scalar_lrintf, xnn_init_qs8_conv_minmax_fp32_scalar_lrintf_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
-TEST(QS8_DWCONV_MINMAX_FP32_UP2X9__SCALAR_LRINT, multipixel_with_qmin) {
+TEST(QS8_DWCONV_MINMAX_FP32_UP2X9__SCALAR_LRINTF, multipixel_with_qmin) {
for (size_t channels = 1; channels <= 10; channels += 1) {
DWConvMicrokernelTester()
.cr(2)
@@ -9855,11 +9855,11 @@
.channels(channels)
.width(3)
.qmin(128)
- .Test(xnn_qs8_dwconv_minmax_fp32_ukernel_up2x9__scalar_lrint, xnn_init_qs8_conv_minmax_fp32_scalar_lrint_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qs8_dwconv_minmax_fp32_ukernel_up2x9__scalar_lrintf, xnn_init_qs8_conv_minmax_fp32_scalar_lrintf_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
-TEST(QS8_DWCONV_MINMAX_FP32_UP2X9__SCALAR_LRINT, multipixel_with_qmax) {
+TEST(QS8_DWCONV_MINMAX_FP32_UP2X9__SCALAR_LRINTF, multipixel_with_qmax) {
for (size_t channels = 1; channels <= 10; channels += 1) {
DWConvMicrokernelTester()
.cr(2)
@@ -9867,22 +9867,22 @@
.channels(channels)
.width(3)
.qmax(128)
- .Test(xnn_qs8_dwconv_minmax_fp32_ukernel_up2x9__scalar_lrint, xnn_init_qs8_conv_minmax_fp32_scalar_lrint_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qs8_dwconv_minmax_fp32_ukernel_up2x9__scalar_lrintf, xnn_init_qs8_conv_minmax_fp32_scalar_lrintf_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
-TEST(QS8_DWCONV_MINMAX_FP32_UP2X9__SCALAR_LRINT, input_offset) {
+TEST(QS8_DWCONV_MINMAX_FP32_UP2X9__SCALAR_LRINTF, input_offset) {
for (uint32_t channels = 4; channels < 32; channels += 6) {
DWConvMicrokernelTester()
.cr(2)
.kr(9)
.channels(channels)
.input_offset(80)
- .Test(xnn_qs8_dwconv_minmax_fp32_ukernel_up2x9__scalar_lrint, xnn_init_qs8_conv_minmax_fp32_scalar_lrint_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qs8_dwconv_minmax_fp32_ukernel_up2x9__scalar_lrintf, xnn_init_qs8_conv_minmax_fp32_scalar_lrintf_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
-TEST(QS8_DWCONV_MINMAX_FP32_UP2X9__SCALAR_LRINT, zero) {
+TEST(QS8_DWCONV_MINMAX_FP32_UP2X9__SCALAR_LRINTF, zero) {
for (uint32_t mz = 0; mz < 9; mz++) {
for (uint32_t channels = 4; channels < 32; channels += 6) {
DWConvMicrokernelTester()
@@ -9891,105 +9891,105 @@
.channels(channels)
.input_offset(80)
.zero_index(mz)
- .Test(xnn_qs8_dwconv_minmax_fp32_ukernel_up2x9__scalar_lrint, xnn_init_qs8_conv_minmax_fp32_scalar_lrint_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qs8_dwconv_minmax_fp32_ukernel_up2x9__scalar_lrintf, xnn_init_qs8_conv_minmax_fp32_scalar_lrintf_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
}
-TEST(QS8_DWCONV_MINMAX_FP32_UP4X9__SCALAR_LRINT, c_eq_4) {
+TEST(QS8_DWCONV_MINMAX_FP32_UP4X9__SCALAR_LRINTF, c_eq_4) {
DWConvMicrokernelTester()
.cr(4)
.kr(9)
.channels(4)
- .Test(xnn_qs8_dwconv_minmax_fp32_ukernel_up4x9__scalar_lrint, xnn_init_qs8_conv_minmax_fp32_scalar_lrint_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qs8_dwconv_minmax_fp32_ukernel_up4x9__scalar_lrintf, xnn_init_qs8_conv_minmax_fp32_scalar_lrintf_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
-TEST(QS8_DWCONV_MINMAX_FP32_UP4X9__SCALAR_LRINT, c_div_4) {
+TEST(QS8_DWCONV_MINMAX_FP32_UP4X9__SCALAR_LRINTF, c_div_4) {
for (uint32_t channels = 8; channels < 64; channels += 12) {
DWConvMicrokernelTester()
.cr(4)
.kr(9)
.channels(channels)
- .Test(xnn_qs8_dwconv_minmax_fp32_ukernel_up4x9__scalar_lrint, xnn_init_qs8_conv_minmax_fp32_scalar_lrint_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qs8_dwconv_minmax_fp32_ukernel_up4x9__scalar_lrintf, xnn_init_qs8_conv_minmax_fp32_scalar_lrintf_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
-TEST(QS8_DWCONV_MINMAX_FP32_UP4X9__SCALAR_LRINT, c_div_4_with_qmin) {
+TEST(QS8_DWCONV_MINMAX_FP32_UP4X9__SCALAR_LRINTF, c_div_4_with_qmin) {
for (uint32_t channels = 8; channels < 64; channels += 12) {
DWConvMicrokernelTester()
.cr(4)
.kr(9)
.channels(channels)
.qmin(128)
- .Test(xnn_qs8_dwconv_minmax_fp32_ukernel_up4x9__scalar_lrint, xnn_init_qs8_conv_minmax_fp32_scalar_lrint_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qs8_dwconv_minmax_fp32_ukernel_up4x9__scalar_lrintf, xnn_init_qs8_conv_minmax_fp32_scalar_lrintf_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
-TEST(QS8_DWCONV_MINMAX_FP32_UP4X9__SCALAR_LRINT, c_div_4_with_qmax) {
+TEST(QS8_DWCONV_MINMAX_FP32_UP4X9__SCALAR_LRINTF, c_div_4_with_qmax) {
for (uint32_t channels = 8; channels < 64; channels += 12) {
DWConvMicrokernelTester()
.cr(4)
.kr(9)
.channels(channels)
.qmax(128)
- .Test(xnn_qs8_dwconv_minmax_fp32_ukernel_up4x9__scalar_lrint, xnn_init_qs8_conv_minmax_fp32_scalar_lrint_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qs8_dwconv_minmax_fp32_ukernel_up4x9__scalar_lrintf, xnn_init_qs8_conv_minmax_fp32_scalar_lrintf_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
-TEST(QS8_DWCONV_MINMAX_FP32_UP4X9__SCALAR_LRINT, c_lt_4) {
+TEST(QS8_DWCONV_MINMAX_FP32_UP4X9__SCALAR_LRINTF, c_lt_4) {
for (uint32_t channels = 1; channels < 4; channels++) {
DWConvMicrokernelTester()
.cr(4)
.kr(9)
.channels(channels)
- .Test(xnn_qs8_dwconv_minmax_fp32_ukernel_up4x9__scalar_lrint, xnn_init_qs8_conv_minmax_fp32_scalar_lrint_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qs8_dwconv_minmax_fp32_ukernel_up4x9__scalar_lrintf, xnn_init_qs8_conv_minmax_fp32_scalar_lrintf_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
-TEST(QS8_DWCONV_MINMAX_FP32_UP4X9__SCALAR_LRINT, c_gt_4) {
+TEST(QS8_DWCONV_MINMAX_FP32_UP4X9__SCALAR_LRINTF, c_gt_4) {
for (uint32_t channels = 5; channels < 8; channels++) {
DWConvMicrokernelTester()
.cr(4)
.kr(9)
.channels(channels)
- .Test(xnn_qs8_dwconv_minmax_fp32_ukernel_up4x9__scalar_lrint, xnn_init_qs8_conv_minmax_fp32_scalar_lrint_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qs8_dwconv_minmax_fp32_ukernel_up4x9__scalar_lrintf, xnn_init_qs8_conv_minmax_fp32_scalar_lrintf_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
-TEST(QS8_DWCONV_MINMAX_FP32_UP4X9__SCALAR_LRINT, c_gt_4_with_qmin) {
+TEST(QS8_DWCONV_MINMAX_FP32_UP4X9__SCALAR_LRINTF, c_gt_4_with_qmin) {
for (uint32_t channels = 5; channels < 8; channels++) {
DWConvMicrokernelTester()
.cr(4)
.kr(9)
.channels(channels)
.qmin(128)
- .Test(xnn_qs8_dwconv_minmax_fp32_ukernel_up4x9__scalar_lrint, xnn_init_qs8_conv_minmax_fp32_scalar_lrint_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qs8_dwconv_minmax_fp32_ukernel_up4x9__scalar_lrintf, xnn_init_qs8_conv_minmax_fp32_scalar_lrintf_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
-TEST(QS8_DWCONV_MINMAX_FP32_UP4X9__SCALAR_LRINT, c_gt_4_with_qmax) {
+TEST(QS8_DWCONV_MINMAX_FP32_UP4X9__SCALAR_LRINTF, c_gt_4_with_qmax) {
for (uint32_t channels = 5; channels < 8; channels++) {
DWConvMicrokernelTester()
.cr(4)
.kr(9)
.channels(channels)
.qmax(128)
- .Test(xnn_qs8_dwconv_minmax_fp32_ukernel_up4x9__scalar_lrint, xnn_init_qs8_conv_minmax_fp32_scalar_lrint_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qs8_dwconv_minmax_fp32_ukernel_up4x9__scalar_lrintf, xnn_init_qs8_conv_minmax_fp32_scalar_lrintf_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
-TEST(QS8_DWCONV_MINMAX_FP32_UP4X9__SCALAR_LRINT, multipixel) {
+TEST(QS8_DWCONV_MINMAX_FP32_UP4X9__SCALAR_LRINTF, multipixel) {
for (size_t channels = 1; channels <= 20; channels += 3) {
DWConvMicrokernelTester()
.cr(4)
.kr(9)
.channels(channels)
.width(3)
- .Test(xnn_qs8_dwconv_minmax_fp32_ukernel_up4x9__scalar_lrint, xnn_init_qs8_conv_minmax_fp32_scalar_lrint_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qs8_dwconv_minmax_fp32_ukernel_up4x9__scalar_lrintf, xnn_init_qs8_conv_minmax_fp32_scalar_lrintf_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
-TEST(QS8_DWCONV_MINMAX_FP32_UP4X9__SCALAR_LRINT, multipixel_with_step) {
+TEST(QS8_DWCONV_MINMAX_FP32_UP4X9__SCALAR_LRINTF, multipixel_with_step) {
for (size_t channels = 1; channels <= 20; channels += 3) {
for (size_t step = 2; step <= 9; step++) {
DWConvMicrokernelTester()
@@ -9998,12 +9998,12 @@
.channels(channels)
.width(3)
.step(step)
- .Test(xnn_qs8_dwconv_minmax_fp32_ukernel_up4x9__scalar_lrint, xnn_init_qs8_conv_minmax_fp32_scalar_lrint_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qs8_dwconv_minmax_fp32_ukernel_up4x9__scalar_lrintf, xnn_init_qs8_conv_minmax_fp32_scalar_lrintf_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
}
-TEST(QS8_DWCONV_MINMAX_FP32_UP4X9__SCALAR_LRINT, multipixel_with_output_stride) {
+TEST(QS8_DWCONV_MINMAX_FP32_UP4X9__SCALAR_LRINTF, multipixel_with_output_stride) {
for (size_t channels = 1; channels <= 20; channels += 3) {
DWConvMicrokernelTester()
.cr(4)
@@ -10011,11 +10011,11 @@
.channels(4)
.width(5)
.output_stride(23)
- .Test(xnn_qs8_dwconv_minmax_fp32_ukernel_up4x9__scalar_lrint, xnn_init_qs8_conv_minmax_fp32_scalar_lrint_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qs8_dwconv_minmax_fp32_ukernel_up4x9__scalar_lrintf, xnn_init_qs8_conv_minmax_fp32_scalar_lrintf_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
-TEST(QS8_DWCONV_MINMAX_FP32_UP4X9__SCALAR_LRINT, multipixel_with_qmin) {
+TEST(QS8_DWCONV_MINMAX_FP32_UP4X9__SCALAR_LRINTF, multipixel_with_qmin) {
for (size_t channels = 1; channels <= 20; channels += 3) {
DWConvMicrokernelTester()
.cr(4)
@@ -10023,11 +10023,11 @@
.channels(channels)
.width(3)
.qmin(128)
- .Test(xnn_qs8_dwconv_minmax_fp32_ukernel_up4x9__scalar_lrint, xnn_init_qs8_conv_minmax_fp32_scalar_lrint_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qs8_dwconv_minmax_fp32_ukernel_up4x9__scalar_lrintf, xnn_init_qs8_conv_minmax_fp32_scalar_lrintf_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
-TEST(QS8_DWCONV_MINMAX_FP32_UP4X9__SCALAR_LRINT, multipixel_with_qmax) {
+TEST(QS8_DWCONV_MINMAX_FP32_UP4X9__SCALAR_LRINTF, multipixel_with_qmax) {
for (size_t channels = 1; channels <= 20; channels += 3) {
DWConvMicrokernelTester()
.cr(4)
@@ -10035,22 +10035,22 @@
.channels(channels)
.width(3)
.qmax(128)
- .Test(xnn_qs8_dwconv_minmax_fp32_ukernel_up4x9__scalar_lrint, xnn_init_qs8_conv_minmax_fp32_scalar_lrint_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qs8_dwconv_minmax_fp32_ukernel_up4x9__scalar_lrintf, xnn_init_qs8_conv_minmax_fp32_scalar_lrintf_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
-TEST(QS8_DWCONV_MINMAX_FP32_UP4X9__SCALAR_LRINT, input_offset) {
+TEST(QS8_DWCONV_MINMAX_FP32_UP4X9__SCALAR_LRINTF, input_offset) {
for (uint32_t channels = 8; channels < 64; channels += 12) {
DWConvMicrokernelTester()
.cr(4)
.kr(9)
.channels(channels)
.input_offset(112)
- .Test(xnn_qs8_dwconv_minmax_fp32_ukernel_up4x9__scalar_lrint, xnn_init_qs8_conv_minmax_fp32_scalar_lrint_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qs8_dwconv_minmax_fp32_ukernel_up4x9__scalar_lrintf, xnn_init_qs8_conv_minmax_fp32_scalar_lrintf_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
-TEST(QS8_DWCONV_MINMAX_FP32_UP4X9__SCALAR_LRINT, zero) {
+TEST(QS8_DWCONV_MINMAX_FP32_UP4X9__SCALAR_LRINTF, zero) {
for (uint32_t mz = 0; mz < 9; mz++) {
for (uint32_t channels = 8; channels < 64; channels += 12) {
DWConvMicrokernelTester()
@@ -10059,63 +10059,63 @@
.channels(channels)
.input_offset(112)
.zero_index(mz)
- .Test(xnn_qs8_dwconv_minmax_fp32_ukernel_up4x9__scalar_lrint, xnn_init_qs8_conv_minmax_fp32_scalar_lrint_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qs8_dwconv_minmax_fp32_ukernel_up4x9__scalar_lrintf, xnn_init_qs8_conv_minmax_fp32_scalar_lrintf_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
}
-TEST(QS8_DWCONV_MINMAX_FP32_UP1X9__SCALAR_MAGIC, c_eq_1) {
+TEST(QS8_DWCONV_MINMAX_FP32_UP1X9__SCALAR_FMAGIC, c_eq_1) {
DWConvMicrokernelTester()
.cr(1)
.kr(9)
.channels(1)
- .Test(xnn_qs8_dwconv_minmax_fp32_ukernel_up1x9__scalar_magic, xnn_init_qs8_conv_minmax_fp32_scalar_magic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qs8_dwconv_minmax_fp32_ukernel_up1x9__scalar_fmagic, xnn_init_qs8_conv_minmax_fp32_scalar_fmagic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
-TEST(QS8_DWCONV_MINMAX_FP32_UP1X9__SCALAR_MAGIC, c_gt_1) {
+TEST(QS8_DWCONV_MINMAX_FP32_UP1X9__SCALAR_FMAGIC, c_gt_1) {
for (uint32_t channels = 2; channels < 10; channels++) {
DWConvMicrokernelTester()
.cr(1)
.kr(9)
.channels(channels)
- .Test(xnn_qs8_dwconv_minmax_fp32_ukernel_up1x9__scalar_magic, xnn_init_qs8_conv_minmax_fp32_scalar_magic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qs8_dwconv_minmax_fp32_ukernel_up1x9__scalar_fmagic, xnn_init_qs8_conv_minmax_fp32_scalar_fmagic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
-TEST(QS8_DWCONV_MINMAX_FP32_UP1X9__SCALAR_MAGIC, c_gt_1_with_qmin) {
+TEST(QS8_DWCONV_MINMAX_FP32_UP1X9__SCALAR_FMAGIC, c_gt_1_with_qmin) {
for (uint32_t channels = 2; channels < 10; channels++) {
DWConvMicrokernelTester()
.cr(1)
.kr(9)
.channels(channels)
.qmin(128)
- .Test(xnn_qs8_dwconv_minmax_fp32_ukernel_up1x9__scalar_magic, xnn_init_qs8_conv_minmax_fp32_scalar_magic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qs8_dwconv_minmax_fp32_ukernel_up1x9__scalar_fmagic, xnn_init_qs8_conv_minmax_fp32_scalar_fmagic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
-TEST(QS8_DWCONV_MINMAX_FP32_UP1X9__SCALAR_MAGIC, c_gt_1_with_qmax) {
+TEST(QS8_DWCONV_MINMAX_FP32_UP1X9__SCALAR_FMAGIC, c_gt_1_with_qmax) {
for (uint32_t channels = 2; channels < 10; channels++) {
DWConvMicrokernelTester()
.cr(1)
.kr(9)
.channels(channels)
.qmax(128)
- .Test(xnn_qs8_dwconv_minmax_fp32_ukernel_up1x9__scalar_magic, xnn_init_qs8_conv_minmax_fp32_scalar_magic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qs8_dwconv_minmax_fp32_ukernel_up1x9__scalar_fmagic, xnn_init_qs8_conv_minmax_fp32_scalar_fmagic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
-TEST(QS8_DWCONV_MINMAX_FP32_UP1X9__SCALAR_MAGIC, multipixel) {
+TEST(QS8_DWCONV_MINMAX_FP32_UP1X9__SCALAR_FMAGIC, multipixel) {
for (size_t channels = 1; channels <= 5; channels += 1) {
DWConvMicrokernelTester()
.cr(1)
.kr(9)
.channels(channels)
.width(3)
- .Test(xnn_qs8_dwconv_minmax_fp32_ukernel_up1x9__scalar_magic, xnn_init_qs8_conv_minmax_fp32_scalar_magic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qs8_dwconv_minmax_fp32_ukernel_up1x9__scalar_fmagic, xnn_init_qs8_conv_minmax_fp32_scalar_fmagic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
-TEST(QS8_DWCONV_MINMAX_FP32_UP1X9__SCALAR_MAGIC, multipixel_with_step) {
+TEST(QS8_DWCONV_MINMAX_FP32_UP1X9__SCALAR_FMAGIC, multipixel_with_step) {
for (size_t channels = 1; channels <= 5; channels += 1) {
for (size_t step = 2; step <= 9; step++) {
DWConvMicrokernelTester()
@@ -10124,12 +10124,12 @@
.channels(channels)
.width(3)
.step(step)
- .Test(xnn_qs8_dwconv_minmax_fp32_ukernel_up1x9__scalar_magic, xnn_init_qs8_conv_minmax_fp32_scalar_magic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qs8_dwconv_minmax_fp32_ukernel_up1x9__scalar_fmagic, xnn_init_qs8_conv_minmax_fp32_scalar_fmagic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
}
-TEST(QS8_DWCONV_MINMAX_FP32_UP1X9__SCALAR_MAGIC, multipixel_with_output_stride) {
+TEST(QS8_DWCONV_MINMAX_FP32_UP1X9__SCALAR_FMAGIC, multipixel_with_output_stride) {
for (size_t channels = 1; channels <= 5; channels += 1) {
DWConvMicrokernelTester()
.cr(1)
@@ -10137,11 +10137,11 @@
.channels(1)
.width(5)
.output_stride(7)
- .Test(xnn_qs8_dwconv_minmax_fp32_ukernel_up1x9__scalar_magic, xnn_init_qs8_conv_minmax_fp32_scalar_magic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qs8_dwconv_minmax_fp32_ukernel_up1x9__scalar_fmagic, xnn_init_qs8_conv_minmax_fp32_scalar_fmagic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
-TEST(QS8_DWCONV_MINMAX_FP32_UP1X9__SCALAR_MAGIC, multipixel_with_qmin) {
+TEST(QS8_DWCONV_MINMAX_FP32_UP1X9__SCALAR_FMAGIC, multipixel_with_qmin) {
for (size_t channels = 1; channels <= 5; channels += 1) {
DWConvMicrokernelTester()
.cr(1)
@@ -10149,11 +10149,11 @@
.channels(channels)
.width(3)
.qmin(128)
- .Test(xnn_qs8_dwconv_minmax_fp32_ukernel_up1x9__scalar_magic, xnn_init_qs8_conv_minmax_fp32_scalar_magic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qs8_dwconv_minmax_fp32_ukernel_up1x9__scalar_fmagic, xnn_init_qs8_conv_minmax_fp32_scalar_fmagic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
-TEST(QS8_DWCONV_MINMAX_FP32_UP1X9__SCALAR_MAGIC, multipixel_with_qmax) {
+TEST(QS8_DWCONV_MINMAX_FP32_UP1X9__SCALAR_FMAGIC, multipixel_with_qmax) {
for (size_t channels = 1; channels <= 5; channels += 1) {
DWConvMicrokernelTester()
.cr(1)
@@ -10161,22 +10161,22 @@
.channels(channels)
.width(3)
.qmax(128)
- .Test(xnn_qs8_dwconv_minmax_fp32_ukernel_up1x9__scalar_magic, xnn_init_qs8_conv_minmax_fp32_scalar_magic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qs8_dwconv_minmax_fp32_ukernel_up1x9__scalar_fmagic, xnn_init_qs8_conv_minmax_fp32_scalar_fmagic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
-TEST(QS8_DWCONV_MINMAX_FP32_UP1X9__SCALAR_MAGIC, input_offset) {
+TEST(QS8_DWCONV_MINMAX_FP32_UP1X9__SCALAR_FMAGIC, input_offset) {
for (uint32_t channels = 2; channels < 16; channels += 3) {
DWConvMicrokernelTester()
.cr(1)
.kr(9)
.channels(channels)
.input_offset(48)
- .Test(xnn_qs8_dwconv_minmax_fp32_ukernel_up1x9__scalar_magic, xnn_init_qs8_conv_minmax_fp32_scalar_magic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qs8_dwconv_minmax_fp32_ukernel_up1x9__scalar_fmagic, xnn_init_qs8_conv_minmax_fp32_scalar_fmagic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
-TEST(QS8_DWCONV_MINMAX_FP32_UP1X9__SCALAR_MAGIC, zero) {
+TEST(QS8_DWCONV_MINMAX_FP32_UP1X9__SCALAR_FMAGIC, zero) {
for (uint32_t mz = 0; mz < 9; mz++) {
for (uint32_t channels = 2; channels < 16; channels += 3) {
DWConvMicrokernelTester()
@@ -10185,105 +10185,105 @@
.channels(channels)
.input_offset(48)
.zero_index(mz)
- .Test(xnn_qs8_dwconv_minmax_fp32_ukernel_up1x9__scalar_magic, xnn_init_qs8_conv_minmax_fp32_scalar_magic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qs8_dwconv_minmax_fp32_ukernel_up1x9__scalar_fmagic, xnn_init_qs8_conv_minmax_fp32_scalar_fmagic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
}
-TEST(QS8_DWCONV_MINMAX_FP32_UP2X9__SCALAR_MAGIC, c_eq_2) {
+TEST(QS8_DWCONV_MINMAX_FP32_UP2X9__SCALAR_FMAGIC, c_eq_2) {
DWConvMicrokernelTester()
.cr(2)
.kr(9)
.channels(2)
- .Test(xnn_qs8_dwconv_minmax_fp32_ukernel_up2x9__scalar_magic, xnn_init_qs8_conv_minmax_fp32_scalar_magic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qs8_dwconv_minmax_fp32_ukernel_up2x9__scalar_fmagic, xnn_init_qs8_conv_minmax_fp32_scalar_fmagic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
-TEST(QS8_DWCONV_MINMAX_FP32_UP2X9__SCALAR_MAGIC, c_div_2) {
+TEST(QS8_DWCONV_MINMAX_FP32_UP2X9__SCALAR_FMAGIC, c_div_2) {
for (uint32_t channels = 4; channels < 32; channels += 6) {
DWConvMicrokernelTester()
.cr(2)
.kr(9)
.channels(channels)
- .Test(xnn_qs8_dwconv_minmax_fp32_ukernel_up2x9__scalar_magic, xnn_init_qs8_conv_minmax_fp32_scalar_magic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qs8_dwconv_minmax_fp32_ukernel_up2x9__scalar_fmagic, xnn_init_qs8_conv_minmax_fp32_scalar_fmagic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
-TEST(QS8_DWCONV_MINMAX_FP32_UP2X9__SCALAR_MAGIC, c_div_2_with_qmin) {
+TEST(QS8_DWCONV_MINMAX_FP32_UP2X9__SCALAR_FMAGIC, c_div_2_with_qmin) {
for (uint32_t channels = 4; channels < 32; channels += 6) {
DWConvMicrokernelTester()
.cr(2)
.kr(9)
.channels(channels)
.qmin(128)
- .Test(xnn_qs8_dwconv_minmax_fp32_ukernel_up2x9__scalar_magic, xnn_init_qs8_conv_minmax_fp32_scalar_magic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qs8_dwconv_minmax_fp32_ukernel_up2x9__scalar_fmagic, xnn_init_qs8_conv_minmax_fp32_scalar_fmagic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
-TEST(QS8_DWCONV_MINMAX_FP32_UP2X9__SCALAR_MAGIC, c_div_2_with_qmax) {
+TEST(QS8_DWCONV_MINMAX_FP32_UP2X9__SCALAR_FMAGIC, c_div_2_with_qmax) {
for (uint32_t channels = 4; channels < 32; channels += 6) {
DWConvMicrokernelTester()
.cr(2)
.kr(9)
.channels(channels)
.qmax(128)
- .Test(xnn_qs8_dwconv_minmax_fp32_ukernel_up2x9__scalar_magic, xnn_init_qs8_conv_minmax_fp32_scalar_magic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qs8_dwconv_minmax_fp32_ukernel_up2x9__scalar_fmagic, xnn_init_qs8_conv_minmax_fp32_scalar_fmagic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
-TEST(QS8_DWCONV_MINMAX_FP32_UP2X9__SCALAR_MAGIC, c_lt_2) {
+TEST(QS8_DWCONV_MINMAX_FP32_UP2X9__SCALAR_FMAGIC, c_lt_2) {
for (uint32_t channels = 1; channels < 2; channels++) {
DWConvMicrokernelTester()
.cr(2)
.kr(9)
.channels(channels)
- .Test(xnn_qs8_dwconv_minmax_fp32_ukernel_up2x9__scalar_magic, xnn_init_qs8_conv_minmax_fp32_scalar_magic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qs8_dwconv_minmax_fp32_ukernel_up2x9__scalar_fmagic, xnn_init_qs8_conv_minmax_fp32_scalar_fmagic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
-TEST(QS8_DWCONV_MINMAX_FP32_UP2X9__SCALAR_MAGIC, c_gt_2) {
+TEST(QS8_DWCONV_MINMAX_FP32_UP2X9__SCALAR_FMAGIC, c_gt_2) {
for (uint32_t channels = 3; channels < 4; channels++) {
DWConvMicrokernelTester()
.cr(2)
.kr(9)
.channels(channels)
- .Test(xnn_qs8_dwconv_minmax_fp32_ukernel_up2x9__scalar_magic, xnn_init_qs8_conv_minmax_fp32_scalar_magic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qs8_dwconv_minmax_fp32_ukernel_up2x9__scalar_fmagic, xnn_init_qs8_conv_minmax_fp32_scalar_fmagic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
-TEST(QS8_DWCONV_MINMAX_FP32_UP2X9__SCALAR_MAGIC, c_gt_2_with_qmin) {
+TEST(QS8_DWCONV_MINMAX_FP32_UP2X9__SCALAR_FMAGIC, c_gt_2_with_qmin) {
for (uint32_t channels = 3; channels < 4; channels++) {
DWConvMicrokernelTester()
.cr(2)
.kr(9)
.channels(channels)
.qmin(128)
- .Test(xnn_qs8_dwconv_minmax_fp32_ukernel_up2x9__scalar_magic, xnn_init_qs8_conv_minmax_fp32_scalar_magic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qs8_dwconv_minmax_fp32_ukernel_up2x9__scalar_fmagic, xnn_init_qs8_conv_minmax_fp32_scalar_fmagic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
-TEST(QS8_DWCONV_MINMAX_FP32_UP2X9__SCALAR_MAGIC, c_gt_2_with_qmax) {
+TEST(QS8_DWCONV_MINMAX_FP32_UP2X9__SCALAR_FMAGIC, c_gt_2_with_qmax) {
for (uint32_t channels = 3; channels < 4; channels++) {
DWConvMicrokernelTester()
.cr(2)
.kr(9)
.channels(channels)
.qmax(128)
- .Test(xnn_qs8_dwconv_minmax_fp32_ukernel_up2x9__scalar_magic, xnn_init_qs8_conv_minmax_fp32_scalar_magic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qs8_dwconv_minmax_fp32_ukernel_up2x9__scalar_fmagic, xnn_init_qs8_conv_minmax_fp32_scalar_fmagic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
-TEST(QS8_DWCONV_MINMAX_FP32_UP2X9__SCALAR_MAGIC, multipixel) {
+TEST(QS8_DWCONV_MINMAX_FP32_UP2X9__SCALAR_FMAGIC, multipixel) {
for (size_t channels = 1; channels <= 10; channels += 1) {
DWConvMicrokernelTester()
.cr(2)
.kr(9)
.channels(channels)
.width(3)
- .Test(xnn_qs8_dwconv_minmax_fp32_ukernel_up2x9__scalar_magic, xnn_init_qs8_conv_minmax_fp32_scalar_magic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qs8_dwconv_minmax_fp32_ukernel_up2x9__scalar_fmagic, xnn_init_qs8_conv_minmax_fp32_scalar_fmagic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
-TEST(QS8_DWCONV_MINMAX_FP32_UP2X9__SCALAR_MAGIC, multipixel_with_step) {
+TEST(QS8_DWCONV_MINMAX_FP32_UP2X9__SCALAR_FMAGIC, multipixel_with_step) {
for (size_t channels = 1; channels <= 10; channels += 1) {
for (size_t step = 2; step <= 9; step++) {
DWConvMicrokernelTester()
@@ -10292,12 +10292,12 @@
.channels(channels)
.width(3)
.step(step)
- .Test(xnn_qs8_dwconv_minmax_fp32_ukernel_up2x9__scalar_magic, xnn_init_qs8_conv_minmax_fp32_scalar_magic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qs8_dwconv_minmax_fp32_ukernel_up2x9__scalar_fmagic, xnn_init_qs8_conv_minmax_fp32_scalar_fmagic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
}
-TEST(QS8_DWCONV_MINMAX_FP32_UP2X9__SCALAR_MAGIC, multipixel_with_output_stride) {
+TEST(QS8_DWCONV_MINMAX_FP32_UP2X9__SCALAR_FMAGIC, multipixel_with_output_stride) {
for (size_t channels = 1; channels <= 10; channels += 1) {
DWConvMicrokernelTester()
.cr(2)
@@ -10305,11 +10305,11 @@
.channels(2)
.width(5)
.output_stride(13)
- .Test(xnn_qs8_dwconv_minmax_fp32_ukernel_up2x9__scalar_magic, xnn_init_qs8_conv_minmax_fp32_scalar_magic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qs8_dwconv_minmax_fp32_ukernel_up2x9__scalar_fmagic, xnn_init_qs8_conv_minmax_fp32_scalar_fmagic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
-TEST(QS8_DWCONV_MINMAX_FP32_UP2X9__SCALAR_MAGIC, multipixel_with_qmin) {
+TEST(QS8_DWCONV_MINMAX_FP32_UP2X9__SCALAR_FMAGIC, multipixel_with_qmin) {
for (size_t channels = 1; channels <= 10; channels += 1) {
DWConvMicrokernelTester()
.cr(2)
@@ -10317,11 +10317,11 @@
.channels(channels)
.width(3)
.qmin(128)
- .Test(xnn_qs8_dwconv_minmax_fp32_ukernel_up2x9__scalar_magic, xnn_init_qs8_conv_minmax_fp32_scalar_magic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qs8_dwconv_minmax_fp32_ukernel_up2x9__scalar_fmagic, xnn_init_qs8_conv_minmax_fp32_scalar_fmagic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
-TEST(QS8_DWCONV_MINMAX_FP32_UP2X9__SCALAR_MAGIC, multipixel_with_qmax) {
+TEST(QS8_DWCONV_MINMAX_FP32_UP2X9__SCALAR_FMAGIC, multipixel_with_qmax) {
for (size_t channels = 1; channels <= 10; channels += 1) {
DWConvMicrokernelTester()
.cr(2)
@@ -10329,22 +10329,22 @@
.channels(channels)
.width(3)
.qmax(128)
- .Test(xnn_qs8_dwconv_minmax_fp32_ukernel_up2x9__scalar_magic, xnn_init_qs8_conv_minmax_fp32_scalar_magic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qs8_dwconv_minmax_fp32_ukernel_up2x9__scalar_fmagic, xnn_init_qs8_conv_minmax_fp32_scalar_fmagic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
-TEST(QS8_DWCONV_MINMAX_FP32_UP2X9__SCALAR_MAGIC, input_offset) {
+TEST(QS8_DWCONV_MINMAX_FP32_UP2X9__SCALAR_FMAGIC, input_offset) {
for (uint32_t channels = 4; channels < 32; channels += 6) {
DWConvMicrokernelTester()
.cr(2)
.kr(9)
.channels(channels)
.input_offset(80)
- .Test(xnn_qs8_dwconv_minmax_fp32_ukernel_up2x9__scalar_magic, xnn_init_qs8_conv_minmax_fp32_scalar_magic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qs8_dwconv_minmax_fp32_ukernel_up2x9__scalar_fmagic, xnn_init_qs8_conv_minmax_fp32_scalar_fmagic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
-TEST(QS8_DWCONV_MINMAX_FP32_UP2X9__SCALAR_MAGIC, zero) {
+TEST(QS8_DWCONV_MINMAX_FP32_UP2X9__SCALAR_FMAGIC, zero) {
for (uint32_t mz = 0; mz < 9; mz++) {
for (uint32_t channels = 4; channels < 32; channels += 6) {
DWConvMicrokernelTester()
@@ -10353,105 +10353,105 @@
.channels(channels)
.input_offset(80)
.zero_index(mz)
- .Test(xnn_qs8_dwconv_minmax_fp32_ukernel_up2x9__scalar_magic, xnn_init_qs8_conv_minmax_fp32_scalar_magic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qs8_dwconv_minmax_fp32_ukernel_up2x9__scalar_fmagic, xnn_init_qs8_conv_minmax_fp32_scalar_fmagic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
}
-TEST(QS8_DWCONV_MINMAX_FP32_UP4X9__SCALAR_MAGIC, c_eq_4) {
+TEST(QS8_DWCONV_MINMAX_FP32_UP4X9__SCALAR_FMAGIC, c_eq_4) {
DWConvMicrokernelTester()
.cr(4)
.kr(9)
.channels(4)
- .Test(xnn_qs8_dwconv_minmax_fp32_ukernel_up4x9__scalar_magic, xnn_init_qs8_conv_minmax_fp32_scalar_magic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qs8_dwconv_minmax_fp32_ukernel_up4x9__scalar_fmagic, xnn_init_qs8_conv_minmax_fp32_scalar_fmagic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
-TEST(QS8_DWCONV_MINMAX_FP32_UP4X9__SCALAR_MAGIC, c_div_4) {
+TEST(QS8_DWCONV_MINMAX_FP32_UP4X9__SCALAR_FMAGIC, c_div_4) {
for (uint32_t channels = 8; channels < 64; channels += 12) {
DWConvMicrokernelTester()
.cr(4)
.kr(9)
.channels(channels)
- .Test(xnn_qs8_dwconv_minmax_fp32_ukernel_up4x9__scalar_magic, xnn_init_qs8_conv_minmax_fp32_scalar_magic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qs8_dwconv_minmax_fp32_ukernel_up4x9__scalar_fmagic, xnn_init_qs8_conv_minmax_fp32_scalar_fmagic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
-TEST(QS8_DWCONV_MINMAX_FP32_UP4X9__SCALAR_MAGIC, c_div_4_with_qmin) {
+TEST(QS8_DWCONV_MINMAX_FP32_UP4X9__SCALAR_FMAGIC, c_div_4_with_qmin) {
for (uint32_t channels = 8; channels < 64; channels += 12) {
DWConvMicrokernelTester()
.cr(4)
.kr(9)
.channels(channels)
.qmin(128)
- .Test(xnn_qs8_dwconv_minmax_fp32_ukernel_up4x9__scalar_magic, xnn_init_qs8_conv_minmax_fp32_scalar_magic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qs8_dwconv_minmax_fp32_ukernel_up4x9__scalar_fmagic, xnn_init_qs8_conv_minmax_fp32_scalar_fmagic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
-TEST(QS8_DWCONV_MINMAX_FP32_UP4X9__SCALAR_MAGIC, c_div_4_with_qmax) {
+TEST(QS8_DWCONV_MINMAX_FP32_UP4X9__SCALAR_FMAGIC, c_div_4_with_qmax) {
for (uint32_t channels = 8; channels < 64; channels += 12) {
DWConvMicrokernelTester()
.cr(4)
.kr(9)
.channels(channels)
.qmax(128)
- .Test(xnn_qs8_dwconv_minmax_fp32_ukernel_up4x9__scalar_magic, xnn_init_qs8_conv_minmax_fp32_scalar_magic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qs8_dwconv_minmax_fp32_ukernel_up4x9__scalar_fmagic, xnn_init_qs8_conv_minmax_fp32_scalar_fmagic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
-TEST(QS8_DWCONV_MINMAX_FP32_UP4X9__SCALAR_MAGIC, c_lt_4) {
+TEST(QS8_DWCONV_MINMAX_FP32_UP4X9__SCALAR_FMAGIC, c_lt_4) {
for (uint32_t channels = 1; channels < 4; channels++) {
DWConvMicrokernelTester()
.cr(4)
.kr(9)
.channels(channels)
- .Test(xnn_qs8_dwconv_minmax_fp32_ukernel_up4x9__scalar_magic, xnn_init_qs8_conv_minmax_fp32_scalar_magic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qs8_dwconv_minmax_fp32_ukernel_up4x9__scalar_fmagic, xnn_init_qs8_conv_minmax_fp32_scalar_fmagic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
-TEST(QS8_DWCONV_MINMAX_FP32_UP4X9__SCALAR_MAGIC, c_gt_4) {
+TEST(QS8_DWCONV_MINMAX_FP32_UP4X9__SCALAR_FMAGIC, c_gt_4) {
for (uint32_t channels = 5; channels < 8; channels++) {
DWConvMicrokernelTester()
.cr(4)
.kr(9)
.channels(channels)
- .Test(xnn_qs8_dwconv_minmax_fp32_ukernel_up4x9__scalar_magic, xnn_init_qs8_conv_minmax_fp32_scalar_magic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qs8_dwconv_minmax_fp32_ukernel_up4x9__scalar_fmagic, xnn_init_qs8_conv_minmax_fp32_scalar_fmagic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
-TEST(QS8_DWCONV_MINMAX_FP32_UP4X9__SCALAR_MAGIC, c_gt_4_with_qmin) {
+TEST(QS8_DWCONV_MINMAX_FP32_UP4X9__SCALAR_FMAGIC, c_gt_4_with_qmin) {
for (uint32_t channels = 5; channels < 8; channels++) {
DWConvMicrokernelTester()
.cr(4)
.kr(9)
.channels(channels)
.qmin(128)
- .Test(xnn_qs8_dwconv_minmax_fp32_ukernel_up4x9__scalar_magic, xnn_init_qs8_conv_minmax_fp32_scalar_magic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qs8_dwconv_minmax_fp32_ukernel_up4x9__scalar_fmagic, xnn_init_qs8_conv_minmax_fp32_scalar_fmagic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
-TEST(QS8_DWCONV_MINMAX_FP32_UP4X9__SCALAR_MAGIC, c_gt_4_with_qmax) {
+TEST(QS8_DWCONV_MINMAX_FP32_UP4X9__SCALAR_FMAGIC, c_gt_4_with_qmax) {
for (uint32_t channels = 5; channels < 8; channels++) {
DWConvMicrokernelTester()
.cr(4)
.kr(9)
.channels(channels)
.qmax(128)
- .Test(xnn_qs8_dwconv_minmax_fp32_ukernel_up4x9__scalar_magic, xnn_init_qs8_conv_minmax_fp32_scalar_magic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qs8_dwconv_minmax_fp32_ukernel_up4x9__scalar_fmagic, xnn_init_qs8_conv_minmax_fp32_scalar_fmagic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
-TEST(QS8_DWCONV_MINMAX_FP32_UP4X9__SCALAR_MAGIC, multipixel) {
+TEST(QS8_DWCONV_MINMAX_FP32_UP4X9__SCALAR_FMAGIC, multipixel) {
for (size_t channels = 1; channels <= 20; channels += 3) {
DWConvMicrokernelTester()
.cr(4)
.kr(9)
.channels(channels)
.width(3)
- .Test(xnn_qs8_dwconv_minmax_fp32_ukernel_up4x9__scalar_magic, xnn_init_qs8_conv_minmax_fp32_scalar_magic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qs8_dwconv_minmax_fp32_ukernel_up4x9__scalar_fmagic, xnn_init_qs8_conv_minmax_fp32_scalar_fmagic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
-TEST(QS8_DWCONV_MINMAX_FP32_UP4X9__SCALAR_MAGIC, multipixel_with_step) {
+TEST(QS8_DWCONV_MINMAX_FP32_UP4X9__SCALAR_FMAGIC, multipixel_with_step) {
for (size_t channels = 1; channels <= 20; channels += 3) {
for (size_t step = 2; step <= 9; step++) {
DWConvMicrokernelTester()
@@ -10460,12 +10460,12 @@
.channels(channels)
.width(3)
.step(step)
- .Test(xnn_qs8_dwconv_minmax_fp32_ukernel_up4x9__scalar_magic, xnn_init_qs8_conv_minmax_fp32_scalar_magic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qs8_dwconv_minmax_fp32_ukernel_up4x9__scalar_fmagic, xnn_init_qs8_conv_minmax_fp32_scalar_fmagic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
}
-TEST(QS8_DWCONV_MINMAX_FP32_UP4X9__SCALAR_MAGIC, multipixel_with_output_stride) {
+TEST(QS8_DWCONV_MINMAX_FP32_UP4X9__SCALAR_FMAGIC, multipixel_with_output_stride) {
for (size_t channels = 1; channels <= 20; channels += 3) {
DWConvMicrokernelTester()
.cr(4)
@@ -10473,11 +10473,11 @@
.channels(4)
.width(5)
.output_stride(23)
- .Test(xnn_qs8_dwconv_minmax_fp32_ukernel_up4x9__scalar_magic, xnn_init_qs8_conv_minmax_fp32_scalar_magic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qs8_dwconv_minmax_fp32_ukernel_up4x9__scalar_fmagic, xnn_init_qs8_conv_minmax_fp32_scalar_fmagic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
-TEST(QS8_DWCONV_MINMAX_FP32_UP4X9__SCALAR_MAGIC, multipixel_with_qmin) {
+TEST(QS8_DWCONV_MINMAX_FP32_UP4X9__SCALAR_FMAGIC, multipixel_with_qmin) {
for (size_t channels = 1; channels <= 20; channels += 3) {
DWConvMicrokernelTester()
.cr(4)
@@ -10485,11 +10485,11 @@
.channels(channels)
.width(3)
.qmin(128)
- .Test(xnn_qs8_dwconv_minmax_fp32_ukernel_up4x9__scalar_magic, xnn_init_qs8_conv_minmax_fp32_scalar_magic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qs8_dwconv_minmax_fp32_ukernel_up4x9__scalar_fmagic, xnn_init_qs8_conv_minmax_fp32_scalar_fmagic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
-TEST(QS8_DWCONV_MINMAX_FP32_UP4X9__SCALAR_MAGIC, multipixel_with_qmax) {
+TEST(QS8_DWCONV_MINMAX_FP32_UP4X9__SCALAR_FMAGIC, multipixel_with_qmax) {
for (size_t channels = 1; channels <= 20; channels += 3) {
DWConvMicrokernelTester()
.cr(4)
@@ -10497,22 +10497,22 @@
.channels(channels)
.width(3)
.qmax(128)
- .Test(xnn_qs8_dwconv_minmax_fp32_ukernel_up4x9__scalar_magic, xnn_init_qs8_conv_minmax_fp32_scalar_magic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qs8_dwconv_minmax_fp32_ukernel_up4x9__scalar_fmagic, xnn_init_qs8_conv_minmax_fp32_scalar_fmagic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
-TEST(QS8_DWCONV_MINMAX_FP32_UP4X9__SCALAR_MAGIC, input_offset) {
+TEST(QS8_DWCONV_MINMAX_FP32_UP4X9__SCALAR_FMAGIC, input_offset) {
for (uint32_t channels = 8; channels < 64; channels += 12) {
DWConvMicrokernelTester()
.cr(4)
.kr(9)
.channels(channels)
.input_offset(112)
- .Test(xnn_qs8_dwconv_minmax_fp32_ukernel_up4x9__scalar_magic, xnn_init_qs8_conv_minmax_fp32_scalar_magic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qs8_dwconv_minmax_fp32_ukernel_up4x9__scalar_fmagic, xnn_init_qs8_conv_minmax_fp32_scalar_fmagic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
-TEST(QS8_DWCONV_MINMAX_FP32_UP4X9__SCALAR_MAGIC, zero) {
+TEST(QS8_DWCONV_MINMAX_FP32_UP4X9__SCALAR_FMAGIC, zero) {
for (uint32_t mz = 0; mz < 9; mz++) {
for (uint32_t channels = 8; channels < 64; channels += 12) {
DWConvMicrokernelTester()
@@ -10521,7 +10521,7 @@
.channels(channels)
.input_offset(112)
.zero_index(mz)
- .Test(xnn_qs8_dwconv_minmax_fp32_ukernel_up4x9__scalar_magic, xnn_init_qs8_conv_minmax_fp32_scalar_magic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qs8_dwconv_minmax_fp32_ukernel_up4x9__scalar_fmagic, xnn_init_qs8_conv_minmax_fp32_scalar_fmagic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
}
@@ -20108,58 +20108,58 @@
#endif // XNN_ARCH_WASMSIMD || XNN_ARCH_WASMRELAXEDSIMD
-TEST(QS8_DWCONV_MINMAX_FP32_UP1X25__SCALAR_LRINT, c_eq_1) {
+TEST(QS8_DWCONV_MINMAX_FP32_UP1X25__SCALAR_LRINTF, c_eq_1) {
DWConvMicrokernelTester()
.cr(1)
.kr(25)
.channels(1)
- .Test(xnn_qs8_dwconv_minmax_fp32_ukernel_up1x25__scalar_lrint, xnn_init_qs8_conv_minmax_fp32_scalar_lrint_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qs8_dwconv_minmax_fp32_ukernel_up1x25__scalar_lrintf, xnn_init_qs8_conv_minmax_fp32_scalar_lrintf_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
-TEST(QS8_DWCONV_MINMAX_FP32_UP1X25__SCALAR_LRINT, c_gt_1) {
+TEST(QS8_DWCONV_MINMAX_FP32_UP1X25__SCALAR_LRINTF, c_gt_1) {
for (uint32_t channels = 2; channels < 10; channels++) {
DWConvMicrokernelTester()
.cr(1)
.kr(25)
.channels(channels)
- .Test(xnn_qs8_dwconv_minmax_fp32_ukernel_up1x25__scalar_lrint, xnn_init_qs8_conv_minmax_fp32_scalar_lrint_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qs8_dwconv_minmax_fp32_ukernel_up1x25__scalar_lrintf, xnn_init_qs8_conv_minmax_fp32_scalar_lrintf_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
-TEST(QS8_DWCONV_MINMAX_FP32_UP1X25__SCALAR_LRINT, c_gt_1_with_qmin) {
+TEST(QS8_DWCONV_MINMAX_FP32_UP1X25__SCALAR_LRINTF, c_gt_1_with_qmin) {
for (uint32_t channels = 2; channels < 10; channels++) {
DWConvMicrokernelTester()
.cr(1)
.kr(25)
.channels(channels)
.qmin(128)
- .Test(xnn_qs8_dwconv_minmax_fp32_ukernel_up1x25__scalar_lrint, xnn_init_qs8_conv_minmax_fp32_scalar_lrint_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qs8_dwconv_minmax_fp32_ukernel_up1x25__scalar_lrintf, xnn_init_qs8_conv_minmax_fp32_scalar_lrintf_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
-TEST(QS8_DWCONV_MINMAX_FP32_UP1X25__SCALAR_LRINT, c_gt_1_with_qmax) {
+TEST(QS8_DWCONV_MINMAX_FP32_UP1X25__SCALAR_LRINTF, c_gt_1_with_qmax) {
for (uint32_t channels = 2; channels < 10; channels++) {
DWConvMicrokernelTester()
.cr(1)
.kr(25)
.channels(channels)
.qmax(128)
- .Test(xnn_qs8_dwconv_minmax_fp32_ukernel_up1x25__scalar_lrint, xnn_init_qs8_conv_minmax_fp32_scalar_lrint_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qs8_dwconv_minmax_fp32_ukernel_up1x25__scalar_lrintf, xnn_init_qs8_conv_minmax_fp32_scalar_lrintf_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
-TEST(QS8_DWCONV_MINMAX_FP32_UP1X25__SCALAR_LRINT, multipixel) {
+TEST(QS8_DWCONV_MINMAX_FP32_UP1X25__SCALAR_LRINTF, multipixel) {
for (size_t channels = 1; channels <= 5; channels += 1) {
DWConvMicrokernelTester()
.cr(1)
.kr(25)
.channels(channels)
.width(3)
- .Test(xnn_qs8_dwconv_minmax_fp32_ukernel_up1x25__scalar_lrint, xnn_init_qs8_conv_minmax_fp32_scalar_lrint_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qs8_dwconv_minmax_fp32_ukernel_up1x25__scalar_lrintf, xnn_init_qs8_conv_minmax_fp32_scalar_lrintf_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
-TEST(QS8_DWCONV_MINMAX_FP32_UP1X25__SCALAR_LRINT, multipixel_with_step) {
+TEST(QS8_DWCONV_MINMAX_FP32_UP1X25__SCALAR_LRINTF, multipixel_with_step) {
for (size_t channels = 1; channels <= 5; channels += 1) {
for (size_t step = 2; step <= 25; step++) {
DWConvMicrokernelTester()
@@ -20168,12 +20168,12 @@
.channels(channels)
.width(3)
.step(step)
- .Test(xnn_qs8_dwconv_minmax_fp32_ukernel_up1x25__scalar_lrint, xnn_init_qs8_conv_minmax_fp32_scalar_lrint_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qs8_dwconv_minmax_fp32_ukernel_up1x25__scalar_lrintf, xnn_init_qs8_conv_minmax_fp32_scalar_lrintf_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
}
-TEST(QS8_DWCONV_MINMAX_FP32_UP1X25__SCALAR_LRINT, multipixel_with_output_stride) {
+TEST(QS8_DWCONV_MINMAX_FP32_UP1X25__SCALAR_LRINTF, multipixel_with_output_stride) {
for (size_t channels = 1; channels <= 5; channels += 1) {
DWConvMicrokernelTester()
.cr(1)
@@ -20181,11 +20181,11 @@
.channels(1)
.width(5)
.output_stride(7)
- .Test(xnn_qs8_dwconv_minmax_fp32_ukernel_up1x25__scalar_lrint, xnn_init_qs8_conv_minmax_fp32_scalar_lrint_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qs8_dwconv_minmax_fp32_ukernel_up1x25__scalar_lrintf, xnn_init_qs8_conv_minmax_fp32_scalar_lrintf_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
-TEST(QS8_DWCONV_MINMAX_FP32_UP1X25__SCALAR_LRINT, multipixel_with_qmin) {
+TEST(QS8_DWCONV_MINMAX_FP32_UP1X25__SCALAR_LRINTF, multipixel_with_qmin) {
for (size_t channels = 1; channels <= 5; channels += 1) {
DWConvMicrokernelTester()
.cr(1)
@@ -20193,11 +20193,11 @@
.channels(channels)
.width(3)
.qmin(128)
- .Test(xnn_qs8_dwconv_minmax_fp32_ukernel_up1x25__scalar_lrint, xnn_init_qs8_conv_minmax_fp32_scalar_lrint_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qs8_dwconv_minmax_fp32_ukernel_up1x25__scalar_lrintf, xnn_init_qs8_conv_minmax_fp32_scalar_lrintf_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
-TEST(QS8_DWCONV_MINMAX_FP32_UP1X25__SCALAR_LRINT, multipixel_with_qmax) {
+TEST(QS8_DWCONV_MINMAX_FP32_UP1X25__SCALAR_LRINTF, multipixel_with_qmax) {
for (size_t channels = 1; channels <= 5; channels += 1) {
DWConvMicrokernelTester()
.cr(1)
@@ -20205,22 +20205,22 @@
.channels(channels)
.width(3)
.qmax(128)
- .Test(xnn_qs8_dwconv_minmax_fp32_ukernel_up1x25__scalar_lrint, xnn_init_qs8_conv_minmax_fp32_scalar_lrint_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qs8_dwconv_minmax_fp32_ukernel_up1x25__scalar_lrintf, xnn_init_qs8_conv_minmax_fp32_scalar_lrintf_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
-TEST(QS8_DWCONV_MINMAX_FP32_UP1X25__SCALAR_LRINT, input_offset) {
+TEST(QS8_DWCONV_MINMAX_FP32_UP1X25__SCALAR_LRINTF, input_offset) {
for (uint32_t channels = 2; channels < 16; channels += 3) {
DWConvMicrokernelTester()
.cr(1)
.kr(25)
.channels(channels)
.input_offset(48)
- .Test(xnn_qs8_dwconv_minmax_fp32_ukernel_up1x25__scalar_lrint, xnn_init_qs8_conv_minmax_fp32_scalar_lrint_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qs8_dwconv_minmax_fp32_ukernel_up1x25__scalar_lrintf, xnn_init_qs8_conv_minmax_fp32_scalar_lrintf_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
-TEST(QS8_DWCONV_MINMAX_FP32_UP1X25__SCALAR_LRINT, zero) {
+TEST(QS8_DWCONV_MINMAX_FP32_UP1X25__SCALAR_LRINTF, zero) {
for (uint32_t mz = 0; mz < 25; mz++) {
for (uint32_t channels = 2; channels < 16; channels += 3) {
DWConvMicrokernelTester()
@@ -20229,105 +20229,105 @@
.channels(channels)
.input_offset(48)
.zero_index(mz)
- .Test(xnn_qs8_dwconv_minmax_fp32_ukernel_up1x25__scalar_lrint, xnn_init_qs8_conv_minmax_fp32_scalar_lrint_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qs8_dwconv_minmax_fp32_ukernel_up1x25__scalar_lrintf, xnn_init_qs8_conv_minmax_fp32_scalar_lrintf_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
}
-TEST(QS8_DWCONV_MINMAX_FP32_UP2X25__SCALAR_LRINT, c_eq_2) {
+TEST(QS8_DWCONV_MINMAX_FP32_UP2X25__SCALAR_LRINTF, c_eq_2) {
DWConvMicrokernelTester()
.cr(2)
.kr(25)
.channels(2)
- .Test(xnn_qs8_dwconv_minmax_fp32_ukernel_up2x25__scalar_lrint, xnn_init_qs8_conv_minmax_fp32_scalar_lrint_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qs8_dwconv_minmax_fp32_ukernel_up2x25__scalar_lrintf, xnn_init_qs8_conv_minmax_fp32_scalar_lrintf_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
-TEST(QS8_DWCONV_MINMAX_FP32_UP2X25__SCALAR_LRINT, c_div_2) {
+TEST(QS8_DWCONV_MINMAX_FP32_UP2X25__SCALAR_LRINTF, c_div_2) {
for (uint32_t channels = 4; channels < 32; channels += 6) {
DWConvMicrokernelTester()
.cr(2)
.kr(25)
.channels(channels)
- .Test(xnn_qs8_dwconv_minmax_fp32_ukernel_up2x25__scalar_lrint, xnn_init_qs8_conv_minmax_fp32_scalar_lrint_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qs8_dwconv_minmax_fp32_ukernel_up2x25__scalar_lrintf, xnn_init_qs8_conv_minmax_fp32_scalar_lrintf_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
-TEST(QS8_DWCONV_MINMAX_FP32_UP2X25__SCALAR_LRINT, c_div_2_with_qmin) {
+TEST(QS8_DWCONV_MINMAX_FP32_UP2X25__SCALAR_LRINTF, c_div_2_with_qmin) {
for (uint32_t channels = 4; channels < 32; channels += 6) {
DWConvMicrokernelTester()
.cr(2)
.kr(25)
.channels(channels)
.qmin(128)
- .Test(xnn_qs8_dwconv_minmax_fp32_ukernel_up2x25__scalar_lrint, xnn_init_qs8_conv_minmax_fp32_scalar_lrint_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qs8_dwconv_minmax_fp32_ukernel_up2x25__scalar_lrintf, xnn_init_qs8_conv_minmax_fp32_scalar_lrintf_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
-TEST(QS8_DWCONV_MINMAX_FP32_UP2X25__SCALAR_LRINT, c_div_2_with_qmax) {
+TEST(QS8_DWCONV_MINMAX_FP32_UP2X25__SCALAR_LRINTF, c_div_2_with_qmax) {
for (uint32_t channels = 4; channels < 32; channels += 6) {
DWConvMicrokernelTester()
.cr(2)
.kr(25)
.channels(channels)
.qmax(128)
- .Test(xnn_qs8_dwconv_minmax_fp32_ukernel_up2x25__scalar_lrint, xnn_init_qs8_conv_minmax_fp32_scalar_lrint_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qs8_dwconv_minmax_fp32_ukernel_up2x25__scalar_lrintf, xnn_init_qs8_conv_minmax_fp32_scalar_lrintf_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
-TEST(QS8_DWCONV_MINMAX_FP32_UP2X25__SCALAR_LRINT, c_lt_2) {
+TEST(QS8_DWCONV_MINMAX_FP32_UP2X25__SCALAR_LRINTF, c_lt_2) {
for (uint32_t channels = 1; channels < 2; channels++) {
DWConvMicrokernelTester()
.cr(2)
.kr(25)
.channels(channels)
- .Test(xnn_qs8_dwconv_minmax_fp32_ukernel_up2x25__scalar_lrint, xnn_init_qs8_conv_minmax_fp32_scalar_lrint_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qs8_dwconv_minmax_fp32_ukernel_up2x25__scalar_lrintf, xnn_init_qs8_conv_minmax_fp32_scalar_lrintf_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
-TEST(QS8_DWCONV_MINMAX_FP32_UP2X25__SCALAR_LRINT, c_gt_2) {
+TEST(QS8_DWCONV_MINMAX_FP32_UP2X25__SCALAR_LRINTF, c_gt_2) {
for (uint32_t channels = 3; channels < 4; channels++) {
DWConvMicrokernelTester()
.cr(2)
.kr(25)
.channels(channels)
- .Test(xnn_qs8_dwconv_minmax_fp32_ukernel_up2x25__scalar_lrint, xnn_init_qs8_conv_minmax_fp32_scalar_lrint_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qs8_dwconv_minmax_fp32_ukernel_up2x25__scalar_lrintf, xnn_init_qs8_conv_minmax_fp32_scalar_lrintf_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
-TEST(QS8_DWCONV_MINMAX_FP32_UP2X25__SCALAR_LRINT, c_gt_2_with_qmin) {
+TEST(QS8_DWCONV_MINMAX_FP32_UP2X25__SCALAR_LRINTF, c_gt_2_with_qmin) {
for (uint32_t channels = 3; channels < 4; channels++) {
DWConvMicrokernelTester()
.cr(2)
.kr(25)
.channels(channels)
.qmin(128)
- .Test(xnn_qs8_dwconv_minmax_fp32_ukernel_up2x25__scalar_lrint, xnn_init_qs8_conv_minmax_fp32_scalar_lrint_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qs8_dwconv_minmax_fp32_ukernel_up2x25__scalar_lrintf, xnn_init_qs8_conv_minmax_fp32_scalar_lrintf_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
-TEST(QS8_DWCONV_MINMAX_FP32_UP2X25__SCALAR_LRINT, c_gt_2_with_qmax) {
+TEST(QS8_DWCONV_MINMAX_FP32_UP2X25__SCALAR_LRINTF, c_gt_2_with_qmax) {
for (uint32_t channels = 3; channels < 4; channels++) {
DWConvMicrokernelTester()
.cr(2)
.kr(25)
.channels(channels)
.qmax(128)
- .Test(xnn_qs8_dwconv_minmax_fp32_ukernel_up2x25__scalar_lrint, xnn_init_qs8_conv_minmax_fp32_scalar_lrint_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qs8_dwconv_minmax_fp32_ukernel_up2x25__scalar_lrintf, xnn_init_qs8_conv_minmax_fp32_scalar_lrintf_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
-TEST(QS8_DWCONV_MINMAX_FP32_UP2X25__SCALAR_LRINT, multipixel) {
+TEST(QS8_DWCONV_MINMAX_FP32_UP2X25__SCALAR_LRINTF, multipixel) {
for (size_t channels = 1; channels <= 10; channels += 1) {
DWConvMicrokernelTester()
.cr(2)
.kr(25)
.channels(channels)
.width(3)
- .Test(xnn_qs8_dwconv_minmax_fp32_ukernel_up2x25__scalar_lrint, xnn_init_qs8_conv_minmax_fp32_scalar_lrint_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qs8_dwconv_minmax_fp32_ukernel_up2x25__scalar_lrintf, xnn_init_qs8_conv_minmax_fp32_scalar_lrintf_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
-TEST(QS8_DWCONV_MINMAX_FP32_UP2X25__SCALAR_LRINT, multipixel_with_step) {
+TEST(QS8_DWCONV_MINMAX_FP32_UP2X25__SCALAR_LRINTF, multipixel_with_step) {
for (size_t channels = 1; channels <= 10; channels += 1) {
for (size_t step = 2; step <= 25; step++) {
DWConvMicrokernelTester()
@@ -20336,12 +20336,12 @@
.channels(channels)
.width(3)
.step(step)
- .Test(xnn_qs8_dwconv_minmax_fp32_ukernel_up2x25__scalar_lrint, xnn_init_qs8_conv_minmax_fp32_scalar_lrint_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qs8_dwconv_minmax_fp32_ukernel_up2x25__scalar_lrintf, xnn_init_qs8_conv_minmax_fp32_scalar_lrintf_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
}
-TEST(QS8_DWCONV_MINMAX_FP32_UP2X25__SCALAR_LRINT, multipixel_with_output_stride) {
+TEST(QS8_DWCONV_MINMAX_FP32_UP2X25__SCALAR_LRINTF, multipixel_with_output_stride) {
for (size_t channels = 1; channels <= 10; channels += 1) {
DWConvMicrokernelTester()
.cr(2)
@@ -20349,11 +20349,11 @@
.channels(2)
.width(5)
.output_stride(13)
- .Test(xnn_qs8_dwconv_minmax_fp32_ukernel_up2x25__scalar_lrint, xnn_init_qs8_conv_minmax_fp32_scalar_lrint_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qs8_dwconv_minmax_fp32_ukernel_up2x25__scalar_lrintf, xnn_init_qs8_conv_minmax_fp32_scalar_lrintf_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
-TEST(QS8_DWCONV_MINMAX_FP32_UP2X25__SCALAR_LRINT, multipixel_with_qmin) {
+TEST(QS8_DWCONV_MINMAX_FP32_UP2X25__SCALAR_LRINTF, multipixel_with_qmin) {
for (size_t channels = 1; channels <= 10; channels += 1) {
DWConvMicrokernelTester()
.cr(2)
@@ -20361,11 +20361,11 @@
.channels(channels)
.width(3)
.qmin(128)
- .Test(xnn_qs8_dwconv_minmax_fp32_ukernel_up2x25__scalar_lrint, xnn_init_qs8_conv_minmax_fp32_scalar_lrint_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qs8_dwconv_minmax_fp32_ukernel_up2x25__scalar_lrintf, xnn_init_qs8_conv_minmax_fp32_scalar_lrintf_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
-TEST(QS8_DWCONV_MINMAX_FP32_UP2X25__SCALAR_LRINT, multipixel_with_qmax) {
+TEST(QS8_DWCONV_MINMAX_FP32_UP2X25__SCALAR_LRINTF, multipixel_with_qmax) {
for (size_t channels = 1; channels <= 10; channels += 1) {
DWConvMicrokernelTester()
.cr(2)
@@ -20373,22 +20373,22 @@
.channels(channels)
.width(3)
.qmax(128)
- .Test(xnn_qs8_dwconv_minmax_fp32_ukernel_up2x25__scalar_lrint, xnn_init_qs8_conv_minmax_fp32_scalar_lrint_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qs8_dwconv_minmax_fp32_ukernel_up2x25__scalar_lrintf, xnn_init_qs8_conv_minmax_fp32_scalar_lrintf_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
-TEST(QS8_DWCONV_MINMAX_FP32_UP2X25__SCALAR_LRINT, input_offset) {
+TEST(QS8_DWCONV_MINMAX_FP32_UP2X25__SCALAR_LRINTF, input_offset) {
for (uint32_t channels = 4; channels < 32; channels += 6) {
DWConvMicrokernelTester()
.cr(2)
.kr(25)
.channels(channels)
.input_offset(80)
- .Test(xnn_qs8_dwconv_minmax_fp32_ukernel_up2x25__scalar_lrint, xnn_init_qs8_conv_minmax_fp32_scalar_lrint_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qs8_dwconv_minmax_fp32_ukernel_up2x25__scalar_lrintf, xnn_init_qs8_conv_minmax_fp32_scalar_lrintf_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
-TEST(QS8_DWCONV_MINMAX_FP32_UP2X25__SCALAR_LRINT, zero) {
+TEST(QS8_DWCONV_MINMAX_FP32_UP2X25__SCALAR_LRINTF, zero) {
for (uint32_t mz = 0; mz < 25; mz++) {
for (uint32_t channels = 4; channels < 32; channels += 6) {
DWConvMicrokernelTester()
@@ -20397,105 +20397,105 @@
.channels(channels)
.input_offset(80)
.zero_index(mz)
- .Test(xnn_qs8_dwconv_minmax_fp32_ukernel_up2x25__scalar_lrint, xnn_init_qs8_conv_minmax_fp32_scalar_lrint_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qs8_dwconv_minmax_fp32_ukernel_up2x25__scalar_lrintf, xnn_init_qs8_conv_minmax_fp32_scalar_lrintf_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
}
-TEST(QS8_DWCONV_MINMAX_FP32_UP4X25__SCALAR_LRINT, c_eq_4) {
+TEST(QS8_DWCONV_MINMAX_FP32_UP4X25__SCALAR_LRINTF, c_eq_4) {
DWConvMicrokernelTester()
.cr(4)
.kr(25)
.channels(4)
- .Test(xnn_qs8_dwconv_minmax_fp32_ukernel_up4x25__scalar_lrint, xnn_init_qs8_conv_minmax_fp32_scalar_lrint_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qs8_dwconv_minmax_fp32_ukernel_up4x25__scalar_lrintf, xnn_init_qs8_conv_minmax_fp32_scalar_lrintf_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
-TEST(QS8_DWCONV_MINMAX_FP32_UP4X25__SCALAR_LRINT, c_div_4) {
+TEST(QS8_DWCONV_MINMAX_FP32_UP4X25__SCALAR_LRINTF, c_div_4) {
for (uint32_t channels = 8; channels < 64; channels += 12) {
DWConvMicrokernelTester()
.cr(4)
.kr(25)
.channels(channels)
- .Test(xnn_qs8_dwconv_minmax_fp32_ukernel_up4x25__scalar_lrint, xnn_init_qs8_conv_minmax_fp32_scalar_lrint_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qs8_dwconv_minmax_fp32_ukernel_up4x25__scalar_lrintf, xnn_init_qs8_conv_minmax_fp32_scalar_lrintf_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
-TEST(QS8_DWCONV_MINMAX_FP32_UP4X25__SCALAR_LRINT, c_div_4_with_qmin) {
+TEST(QS8_DWCONV_MINMAX_FP32_UP4X25__SCALAR_LRINTF, c_div_4_with_qmin) {
for (uint32_t channels = 8; channels < 64; channels += 12) {
DWConvMicrokernelTester()
.cr(4)
.kr(25)
.channels(channels)
.qmin(128)
- .Test(xnn_qs8_dwconv_minmax_fp32_ukernel_up4x25__scalar_lrint, xnn_init_qs8_conv_minmax_fp32_scalar_lrint_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qs8_dwconv_minmax_fp32_ukernel_up4x25__scalar_lrintf, xnn_init_qs8_conv_minmax_fp32_scalar_lrintf_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
-TEST(QS8_DWCONV_MINMAX_FP32_UP4X25__SCALAR_LRINT, c_div_4_with_qmax) {
+TEST(QS8_DWCONV_MINMAX_FP32_UP4X25__SCALAR_LRINTF, c_div_4_with_qmax) {
for (uint32_t channels = 8; channels < 64; channels += 12) {
DWConvMicrokernelTester()
.cr(4)
.kr(25)
.channels(channels)
.qmax(128)
- .Test(xnn_qs8_dwconv_minmax_fp32_ukernel_up4x25__scalar_lrint, xnn_init_qs8_conv_minmax_fp32_scalar_lrint_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qs8_dwconv_minmax_fp32_ukernel_up4x25__scalar_lrintf, xnn_init_qs8_conv_minmax_fp32_scalar_lrintf_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
-TEST(QS8_DWCONV_MINMAX_FP32_UP4X25__SCALAR_LRINT, c_lt_4) {
+TEST(QS8_DWCONV_MINMAX_FP32_UP4X25__SCALAR_LRINTF, c_lt_4) {
for (uint32_t channels = 1; channels < 4; channels++) {
DWConvMicrokernelTester()
.cr(4)
.kr(25)
.channels(channels)
- .Test(xnn_qs8_dwconv_minmax_fp32_ukernel_up4x25__scalar_lrint, xnn_init_qs8_conv_minmax_fp32_scalar_lrint_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qs8_dwconv_minmax_fp32_ukernel_up4x25__scalar_lrintf, xnn_init_qs8_conv_minmax_fp32_scalar_lrintf_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
-TEST(QS8_DWCONV_MINMAX_FP32_UP4X25__SCALAR_LRINT, c_gt_4) {
+TEST(QS8_DWCONV_MINMAX_FP32_UP4X25__SCALAR_LRINTF, c_gt_4) {
for (uint32_t channels = 5; channels < 8; channels++) {
DWConvMicrokernelTester()
.cr(4)
.kr(25)
.channels(channels)
- .Test(xnn_qs8_dwconv_minmax_fp32_ukernel_up4x25__scalar_lrint, xnn_init_qs8_conv_minmax_fp32_scalar_lrint_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qs8_dwconv_minmax_fp32_ukernel_up4x25__scalar_lrintf, xnn_init_qs8_conv_minmax_fp32_scalar_lrintf_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
-TEST(QS8_DWCONV_MINMAX_FP32_UP4X25__SCALAR_LRINT, c_gt_4_with_qmin) {
+TEST(QS8_DWCONV_MINMAX_FP32_UP4X25__SCALAR_LRINTF, c_gt_4_with_qmin) {
for (uint32_t channels = 5; channels < 8; channels++) {
DWConvMicrokernelTester()
.cr(4)
.kr(25)
.channels(channels)
.qmin(128)
- .Test(xnn_qs8_dwconv_minmax_fp32_ukernel_up4x25__scalar_lrint, xnn_init_qs8_conv_minmax_fp32_scalar_lrint_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qs8_dwconv_minmax_fp32_ukernel_up4x25__scalar_lrintf, xnn_init_qs8_conv_minmax_fp32_scalar_lrintf_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
-TEST(QS8_DWCONV_MINMAX_FP32_UP4X25__SCALAR_LRINT, c_gt_4_with_qmax) {
+TEST(QS8_DWCONV_MINMAX_FP32_UP4X25__SCALAR_LRINTF, c_gt_4_with_qmax) {
for (uint32_t channels = 5; channels < 8; channels++) {
DWConvMicrokernelTester()
.cr(4)
.kr(25)
.channels(channels)
.qmax(128)
- .Test(xnn_qs8_dwconv_minmax_fp32_ukernel_up4x25__scalar_lrint, xnn_init_qs8_conv_minmax_fp32_scalar_lrint_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qs8_dwconv_minmax_fp32_ukernel_up4x25__scalar_lrintf, xnn_init_qs8_conv_minmax_fp32_scalar_lrintf_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
-TEST(QS8_DWCONV_MINMAX_FP32_UP4X25__SCALAR_LRINT, multipixel) {
+TEST(QS8_DWCONV_MINMAX_FP32_UP4X25__SCALAR_LRINTF, multipixel) {
for (size_t channels = 1; channels <= 20; channels += 3) {
DWConvMicrokernelTester()
.cr(4)
.kr(25)
.channels(channels)
.width(3)
- .Test(xnn_qs8_dwconv_minmax_fp32_ukernel_up4x25__scalar_lrint, xnn_init_qs8_conv_minmax_fp32_scalar_lrint_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qs8_dwconv_minmax_fp32_ukernel_up4x25__scalar_lrintf, xnn_init_qs8_conv_minmax_fp32_scalar_lrintf_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
-TEST(QS8_DWCONV_MINMAX_FP32_UP4X25__SCALAR_LRINT, multipixel_with_step) {
+TEST(QS8_DWCONV_MINMAX_FP32_UP4X25__SCALAR_LRINTF, multipixel_with_step) {
for (size_t channels = 1; channels <= 20; channels += 3) {
for (size_t step = 2; step <= 25; step++) {
DWConvMicrokernelTester()
@@ -20504,12 +20504,12 @@
.channels(channels)
.width(3)
.step(step)
- .Test(xnn_qs8_dwconv_minmax_fp32_ukernel_up4x25__scalar_lrint, xnn_init_qs8_conv_minmax_fp32_scalar_lrint_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qs8_dwconv_minmax_fp32_ukernel_up4x25__scalar_lrintf, xnn_init_qs8_conv_minmax_fp32_scalar_lrintf_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
}
-TEST(QS8_DWCONV_MINMAX_FP32_UP4X25__SCALAR_LRINT, multipixel_with_output_stride) {
+TEST(QS8_DWCONV_MINMAX_FP32_UP4X25__SCALAR_LRINTF, multipixel_with_output_stride) {
for (size_t channels = 1; channels <= 20; channels += 3) {
DWConvMicrokernelTester()
.cr(4)
@@ -20517,11 +20517,11 @@
.channels(4)
.width(5)
.output_stride(23)
- .Test(xnn_qs8_dwconv_minmax_fp32_ukernel_up4x25__scalar_lrint, xnn_init_qs8_conv_minmax_fp32_scalar_lrint_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qs8_dwconv_minmax_fp32_ukernel_up4x25__scalar_lrintf, xnn_init_qs8_conv_minmax_fp32_scalar_lrintf_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
-TEST(QS8_DWCONV_MINMAX_FP32_UP4X25__SCALAR_LRINT, multipixel_with_qmin) {
+TEST(QS8_DWCONV_MINMAX_FP32_UP4X25__SCALAR_LRINTF, multipixel_with_qmin) {
for (size_t channels = 1; channels <= 20; channels += 3) {
DWConvMicrokernelTester()
.cr(4)
@@ -20529,11 +20529,11 @@
.channels(channels)
.width(3)
.qmin(128)
- .Test(xnn_qs8_dwconv_minmax_fp32_ukernel_up4x25__scalar_lrint, xnn_init_qs8_conv_minmax_fp32_scalar_lrint_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qs8_dwconv_minmax_fp32_ukernel_up4x25__scalar_lrintf, xnn_init_qs8_conv_minmax_fp32_scalar_lrintf_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
-TEST(QS8_DWCONV_MINMAX_FP32_UP4X25__SCALAR_LRINT, multipixel_with_qmax) {
+TEST(QS8_DWCONV_MINMAX_FP32_UP4X25__SCALAR_LRINTF, multipixel_with_qmax) {
for (size_t channels = 1; channels <= 20; channels += 3) {
DWConvMicrokernelTester()
.cr(4)
@@ -20541,22 +20541,22 @@
.channels(channels)
.width(3)
.qmax(128)
- .Test(xnn_qs8_dwconv_minmax_fp32_ukernel_up4x25__scalar_lrint, xnn_init_qs8_conv_minmax_fp32_scalar_lrint_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qs8_dwconv_minmax_fp32_ukernel_up4x25__scalar_lrintf, xnn_init_qs8_conv_minmax_fp32_scalar_lrintf_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
-TEST(QS8_DWCONV_MINMAX_FP32_UP4X25__SCALAR_LRINT, input_offset) {
+TEST(QS8_DWCONV_MINMAX_FP32_UP4X25__SCALAR_LRINTF, input_offset) {
for (uint32_t channels = 8; channels < 64; channels += 12) {
DWConvMicrokernelTester()
.cr(4)
.kr(25)
.channels(channels)
.input_offset(112)
- .Test(xnn_qs8_dwconv_minmax_fp32_ukernel_up4x25__scalar_lrint, xnn_init_qs8_conv_minmax_fp32_scalar_lrint_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qs8_dwconv_minmax_fp32_ukernel_up4x25__scalar_lrintf, xnn_init_qs8_conv_minmax_fp32_scalar_lrintf_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
-TEST(QS8_DWCONV_MINMAX_FP32_UP4X25__SCALAR_LRINT, zero) {
+TEST(QS8_DWCONV_MINMAX_FP32_UP4X25__SCALAR_LRINTF, zero) {
for (uint32_t mz = 0; mz < 25; mz++) {
for (uint32_t channels = 8; channels < 64; channels += 12) {
DWConvMicrokernelTester()
@@ -20565,63 +20565,63 @@
.channels(channels)
.input_offset(112)
.zero_index(mz)
- .Test(xnn_qs8_dwconv_minmax_fp32_ukernel_up4x25__scalar_lrint, xnn_init_qs8_conv_minmax_fp32_scalar_lrint_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qs8_dwconv_minmax_fp32_ukernel_up4x25__scalar_lrintf, xnn_init_qs8_conv_minmax_fp32_scalar_lrintf_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
}
-TEST(QS8_DWCONV_MINMAX_FP32_UP1X25__SCALAR_MAGIC, c_eq_1) {
+TEST(QS8_DWCONV_MINMAX_FP32_UP1X25__SCALAR_FMAGIC, c_eq_1) {
DWConvMicrokernelTester()
.cr(1)
.kr(25)
.channels(1)
- .Test(xnn_qs8_dwconv_minmax_fp32_ukernel_up1x25__scalar_magic, xnn_init_qs8_conv_minmax_fp32_scalar_magic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qs8_dwconv_minmax_fp32_ukernel_up1x25__scalar_fmagic, xnn_init_qs8_conv_minmax_fp32_scalar_fmagic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
-TEST(QS8_DWCONV_MINMAX_FP32_UP1X25__SCALAR_MAGIC, c_gt_1) {
+TEST(QS8_DWCONV_MINMAX_FP32_UP1X25__SCALAR_FMAGIC, c_gt_1) {
for (uint32_t channels = 2; channels < 10; channels++) {
DWConvMicrokernelTester()
.cr(1)
.kr(25)
.channels(channels)
- .Test(xnn_qs8_dwconv_minmax_fp32_ukernel_up1x25__scalar_magic, xnn_init_qs8_conv_minmax_fp32_scalar_magic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qs8_dwconv_minmax_fp32_ukernel_up1x25__scalar_fmagic, xnn_init_qs8_conv_minmax_fp32_scalar_fmagic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
-TEST(QS8_DWCONV_MINMAX_FP32_UP1X25__SCALAR_MAGIC, c_gt_1_with_qmin) {
+TEST(QS8_DWCONV_MINMAX_FP32_UP1X25__SCALAR_FMAGIC, c_gt_1_with_qmin) {
for (uint32_t channels = 2; channels < 10; channels++) {
DWConvMicrokernelTester()
.cr(1)
.kr(25)
.channels(channels)
.qmin(128)
- .Test(xnn_qs8_dwconv_minmax_fp32_ukernel_up1x25__scalar_magic, xnn_init_qs8_conv_minmax_fp32_scalar_magic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qs8_dwconv_minmax_fp32_ukernel_up1x25__scalar_fmagic, xnn_init_qs8_conv_minmax_fp32_scalar_fmagic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
-TEST(QS8_DWCONV_MINMAX_FP32_UP1X25__SCALAR_MAGIC, c_gt_1_with_qmax) {
+TEST(QS8_DWCONV_MINMAX_FP32_UP1X25__SCALAR_FMAGIC, c_gt_1_with_qmax) {
for (uint32_t channels = 2; channels < 10; channels++) {
DWConvMicrokernelTester()
.cr(1)
.kr(25)
.channels(channels)
.qmax(128)
- .Test(xnn_qs8_dwconv_minmax_fp32_ukernel_up1x25__scalar_magic, xnn_init_qs8_conv_minmax_fp32_scalar_magic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qs8_dwconv_minmax_fp32_ukernel_up1x25__scalar_fmagic, xnn_init_qs8_conv_minmax_fp32_scalar_fmagic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
-TEST(QS8_DWCONV_MINMAX_FP32_UP1X25__SCALAR_MAGIC, multipixel) {
+TEST(QS8_DWCONV_MINMAX_FP32_UP1X25__SCALAR_FMAGIC, multipixel) {
for (size_t channels = 1; channels <= 5; channels += 1) {
DWConvMicrokernelTester()
.cr(1)
.kr(25)
.channels(channels)
.width(3)
- .Test(xnn_qs8_dwconv_minmax_fp32_ukernel_up1x25__scalar_magic, xnn_init_qs8_conv_minmax_fp32_scalar_magic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qs8_dwconv_minmax_fp32_ukernel_up1x25__scalar_fmagic, xnn_init_qs8_conv_minmax_fp32_scalar_fmagic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
-TEST(QS8_DWCONV_MINMAX_FP32_UP1X25__SCALAR_MAGIC, multipixel_with_step) {
+TEST(QS8_DWCONV_MINMAX_FP32_UP1X25__SCALAR_FMAGIC, multipixel_with_step) {
for (size_t channels = 1; channels <= 5; channels += 1) {
for (size_t step = 2; step <= 25; step++) {
DWConvMicrokernelTester()
@@ -20630,12 +20630,12 @@
.channels(channels)
.width(3)
.step(step)
- .Test(xnn_qs8_dwconv_minmax_fp32_ukernel_up1x25__scalar_magic, xnn_init_qs8_conv_minmax_fp32_scalar_magic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qs8_dwconv_minmax_fp32_ukernel_up1x25__scalar_fmagic, xnn_init_qs8_conv_minmax_fp32_scalar_fmagic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
}
-TEST(QS8_DWCONV_MINMAX_FP32_UP1X25__SCALAR_MAGIC, multipixel_with_output_stride) {
+TEST(QS8_DWCONV_MINMAX_FP32_UP1X25__SCALAR_FMAGIC, multipixel_with_output_stride) {
for (size_t channels = 1; channels <= 5; channels += 1) {
DWConvMicrokernelTester()
.cr(1)
@@ -20643,11 +20643,11 @@
.channels(1)
.width(5)
.output_stride(7)
- .Test(xnn_qs8_dwconv_minmax_fp32_ukernel_up1x25__scalar_magic, xnn_init_qs8_conv_minmax_fp32_scalar_magic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qs8_dwconv_minmax_fp32_ukernel_up1x25__scalar_fmagic, xnn_init_qs8_conv_minmax_fp32_scalar_fmagic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
-TEST(QS8_DWCONV_MINMAX_FP32_UP1X25__SCALAR_MAGIC, multipixel_with_qmin) {
+TEST(QS8_DWCONV_MINMAX_FP32_UP1X25__SCALAR_FMAGIC, multipixel_with_qmin) {
for (size_t channels = 1; channels <= 5; channels += 1) {
DWConvMicrokernelTester()
.cr(1)
@@ -20655,11 +20655,11 @@
.channels(channels)
.width(3)
.qmin(128)
- .Test(xnn_qs8_dwconv_minmax_fp32_ukernel_up1x25__scalar_magic, xnn_init_qs8_conv_minmax_fp32_scalar_magic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qs8_dwconv_minmax_fp32_ukernel_up1x25__scalar_fmagic, xnn_init_qs8_conv_minmax_fp32_scalar_fmagic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
-TEST(QS8_DWCONV_MINMAX_FP32_UP1X25__SCALAR_MAGIC, multipixel_with_qmax) {
+TEST(QS8_DWCONV_MINMAX_FP32_UP1X25__SCALAR_FMAGIC, multipixel_with_qmax) {
for (size_t channels = 1; channels <= 5; channels += 1) {
DWConvMicrokernelTester()
.cr(1)
@@ -20667,22 +20667,22 @@
.channels(channels)
.width(3)
.qmax(128)
- .Test(xnn_qs8_dwconv_minmax_fp32_ukernel_up1x25__scalar_magic, xnn_init_qs8_conv_minmax_fp32_scalar_magic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qs8_dwconv_minmax_fp32_ukernel_up1x25__scalar_fmagic, xnn_init_qs8_conv_minmax_fp32_scalar_fmagic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
-TEST(QS8_DWCONV_MINMAX_FP32_UP1X25__SCALAR_MAGIC, input_offset) {
+TEST(QS8_DWCONV_MINMAX_FP32_UP1X25__SCALAR_FMAGIC, input_offset) {
for (uint32_t channels = 2; channels < 16; channels += 3) {
DWConvMicrokernelTester()
.cr(1)
.kr(25)
.channels(channels)
.input_offset(48)
- .Test(xnn_qs8_dwconv_minmax_fp32_ukernel_up1x25__scalar_magic, xnn_init_qs8_conv_minmax_fp32_scalar_magic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qs8_dwconv_minmax_fp32_ukernel_up1x25__scalar_fmagic, xnn_init_qs8_conv_minmax_fp32_scalar_fmagic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
-TEST(QS8_DWCONV_MINMAX_FP32_UP1X25__SCALAR_MAGIC, zero) {
+TEST(QS8_DWCONV_MINMAX_FP32_UP1X25__SCALAR_FMAGIC, zero) {
for (uint32_t mz = 0; mz < 25; mz++) {
for (uint32_t channels = 2; channels < 16; channels += 3) {
DWConvMicrokernelTester()
@@ -20691,105 +20691,105 @@
.channels(channels)
.input_offset(48)
.zero_index(mz)
- .Test(xnn_qs8_dwconv_minmax_fp32_ukernel_up1x25__scalar_magic, xnn_init_qs8_conv_minmax_fp32_scalar_magic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qs8_dwconv_minmax_fp32_ukernel_up1x25__scalar_fmagic, xnn_init_qs8_conv_minmax_fp32_scalar_fmagic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
}
-TEST(QS8_DWCONV_MINMAX_FP32_UP2X25__SCALAR_MAGIC, c_eq_2) {
+TEST(QS8_DWCONV_MINMAX_FP32_UP2X25__SCALAR_FMAGIC, c_eq_2) {
DWConvMicrokernelTester()
.cr(2)
.kr(25)
.channels(2)
- .Test(xnn_qs8_dwconv_minmax_fp32_ukernel_up2x25__scalar_magic, xnn_init_qs8_conv_minmax_fp32_scalar_magic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qs8_dwconv_minmax_fp32_ukernel_up2x25__scalar_fmagic, xnn_init_qs8_conv_minmax_fp32_scalar_fmagic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
-TEST(QS8_DWCONV_MINMAX_FP32_UP2X25__SCALAR_MAGIC, c_div_2) {
+TEST(QS8_DWCONV_MINMAX_FP32_UP2X25__SCALAR_FMAGIC, c_div_2) {
for (uint32_t channels = 4; channels < 32; channels += 6) {
DWConvMicrokernelTester()
.cr(2)
.kr(25)
.channels(channels)
- .Test(xnn_qs8_dwconv_minmax_fp32_ukernel_up2x25__scalar_magic, xnn_init_qs8_conv_minmax_fp32_scalar_magic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qs8_dwconv_minmax_fp32_ukernel_up2x25__scalar_fmagic, xnn_init_qs8_conv_minmax_fp32_scalar_fmagic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
-TEST(QS8_DWCONV_MINMAX_FP32_UP2X25__SCALAR_MAGIC, c_div_2_with_qmin) {
+TEST(QS8_DWCONV_MINMAX_FP32_UP2X25__SCALAR_FMAGIC, c_div_2_with_qmin) {
for (uint32_t channels = 4; channels < 32; channels += 6) {
DWConvMicrokernelTester()
.cr(2)
.kr(25)
.channels(channels)
.qmin(128)
- .Test(xnn_qs8_dwconv_minmax_fp32_ukernel_up2x25__scalar_magic, xnn_init_qs8_conv_minmax_fp32_scalar_magic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qs8_dwconv_minmax_fp32_ukernel_up2x25__scalar_fmagic, xnn_init_qs8_conv_minmax_fp32_scalar_fmagic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
-TEST(QS8_DWCONV_MINMAX_FP32_UP2X25__SCALAR_MAGIC, c_div_2_with_qmax) {
+TEST(QS8_DWCONV_MINMAX_FP32_UP2X25__SCALAR_FMAGIC, c_div_2_with_qmax) {
for (uint32_t channels = 4; channels < 32; channels += 6) {
DWConvMicrokernelTester()
.cr(2)
.kr(25)
.channels(channels)
.qmax(128)
- .Test(xnn_qs8_dwconv_minmax_fp32_ukernel_up2x25__scalar_magic, xnn_init_qs8_conv_minmax_fp32_scalar_magic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qs8_dwconv_minmax_fp32_ukernel_up2x25__scalar_fmagic, xnn_init_qs8_conv_minmax_fp32_scalar_fmagic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
-TEST(QS8_DWCONV_MINMAX_FP32_UP2X25__SCALAR_MAGIC, c_lt_2) {
+TEST(QS8_DWCONV_MINMAX_FP32_UP2X25__SCALAR_FMAGIC, c_lt_2) {
for (uint32_t channels = 1; channels < 2; channels++) {
DWConvMicrokernelTester()
.cr(2)
.kr(25)
.channels(channels)
- .Test(xnn_qs8_dwconv_minmax_fp32_ukernel_up2x25__scalar_magic, xnn_init_qs8_conv_minmax_fp32_scalar_magic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qs8_dwconv_minmax_fp32_ukernel_up2x25__scalar_fmagic, xnn_init_qs8_conv_minmax_fp32_scalar_fmagic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
-TEST(QS8_DWCONV_MINMAX_FP32_UP2X25__SCALAR_MAGIC, c_gt_2) {
+TEST(QS8_DWCONV_MINMAX_FP32_UP2X25__SCALAR_FMAGIC, c_gt_2) {
for (uint32_t channels = 3; channels < 4; channels++) {
DWConvMicrokernelTester()
.cr(2)
.kr(25)
.channels(channels)
- .Test(xnn_qs8_dwconv_minmax_fp32_ukernel_up2x25__scalar_magic, xnn_init_qs8_conv_minmax_fp32_scalar_magic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qs8_dwconv_minmax_fp32_ukernel_up2x25__scalar_fmagic, xnn_init_qs8_conv_minmax_fp32_scalar_fmagic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
-TEST(QS8_DWCONV_MINMAX_FP32_UP2X25__SCALAR_MAGIC, c_gt_2_with_qmin) {
+TEST(QS8_DWCONV_MINMAX_FP32_UP2X25__SCALAR_FMAGIC, c_gt_2_with_qmin) {
for (uint32_t channels = 3; channels < 4; channels++) {
DWConvMicrokernelTester()
.cr(2)
.kr(25)
.channels(channels)
.qmin(128)
- .Test(xnn_qs8_dwconv_minmax_fp32_ukernel_up2x25__scalar_magic, xnn_init_qs8_conv_minmax_fp32_scalar_magic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qs8_dwconv_minmax_fp32_ukernel_up2x25__scalar_fmagic, xnn_init_qs8_conv_minmax_fp32_scalar_fmagic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
-TEST(QS8_DWCONV_MINMAX_FP32_UP2X25__SCALAR_MAGIC, c_gt_2_with_qmax) {
+TEST(QS8_DWCONV_MINMAX_FP32_UP2X25__SCALAR_FMAGIC, c_gt_2_with_qmax) {
for (uint32_t channels = 3; channels < 4; channels++) {
DWConvMicrokernelTester()
.cr(2)
.kr(25)
.channels(channels)
.qmax(128)
- .Test(xnn_qs8_dwconv_minmax_fp32_ukernel_up2x25__scalar_magic, xnn_init_qs8_conv_minmax_fp32_scalar_magic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qs8_dwconv_minmax_fp32_ukernel_up2x25__scalar_fmagic, xnn_init_qs8_conv_minmax_fp32_scalar_fmagic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
-TEST(QS8_DWCONV_MINMAX_FP32_UP2X25__SCALAR_MAGIC, multipixel) {
+TEST(QS8_DWCONV_MINMAX_FP32_UP2X25__SCALAR_FMAGIC, multipixel) {
for (size_t channels = 1; channels <= 10; channels += 1) {
DWConvMicrokernelTester()
.cr(2)
.kr(25)
.channels(channels)
.width(3)
- .Test(xnn_qs8_dwconv_minmax_fp32_ukernel_up2x25__scalar_magic, xnn_init_qs8_conv_minmax_fp32_scalar_magic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qs8_dwconv_minmax_fp32_ukernel_up2x25__scalar_fmagic, xnn_init_qs8_conv_minmax_fp32_scalar_fmagic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
-TEST(QS8_DWCONV_MINMAX_FP32_UP2X25__SCALAR_MAGIC, multipixel_with_step) {
+TEST(QS8_DWCONV_MINMAX_FP32_UP2X25__SCALAR_FMAGIC, multipixel_with_step) {
for (size_t channels = 1; channels <= 10; channels += 1) {
for (size_t step = 2; step <= 25; step++) {
DWConvMicrokernelTester()
@@ -20798,12 +20798,12 @@
.channels(channels)
.width(3)
.step(step)
- .Test(xnn_qs8_dwconv_minmax_fp32_ukernel_up2x25__scalar_magic, xnn_init_qs8_conv_minmax_fp32_scalar_magic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qs8_dwconv_minmax_fp32_ukernel_up2x25__scalar_fmagic, xnn_init_qs8_conv_minmax_fp32_scalar_fmagic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
}
-TEST(QS8_DWCONV_MINMAX_FP32_UP2X25__SCALAR_MAGIC, multipixel_with_output_stride) {
+TEST(QS8_DWCONV_MINMAX_FP32_UP2X25__SCALAR_FMAGIC, multipixel_with_output_stride) {
for (size_t channels = 1; channels <= 10; channels += 1) {
DWConvMicrokernelTester()
.cr(2)
@@ -20811,11 +20811,11 @@
.channels(2)
.width(5)
.output_stride(13)
- .Test(xnn_qs8_dwconv_minmax_fp32_ukernel_up2x25__scalar_magic, xnn_init_qs8_conv_minmax_fp32_scalar_magic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qs8_dwconv_minmax_fp32_ukernel_up2x25__scalar_fmagic, xnn_init_qs8_conv_minmax_fp32_scalar_fmagic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
-TEST(QS8_DWCONV_MINMAX_FP32_UP2X25__SCALAR_MAGIC, multipixel_with_qmin) {
+TEST(QS8_DWCONV_MINMAX_FP32_UP2X25__SCALAR_FMAGIC, multipixel_with_qmin) {
for (size_t channels = 1; channels <= 10; channels += 1) {
DWConvMicrokernelTester()
.cr(2)
@@ -20823,11 +20823,11 @@
.channels(channels)
.width(3)
.qmin(128)
- .Test(xnn_qs8_dwconv_minmax_fp32_ukernel_up2x25__scalar_magic, xnn_init_qs8_conv_minmax_fp32_scalar_magic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qs8_dwconv_minmax_fp32_ukernel_up2x25__scalar_fmagic, xnn_init_qs8_conv_minmax_fp32_scalar_fmagic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
-TEST(QS8_DWCONV_MINMAX_FP32_UP2X25__SCALAR_MAGIC, multipixel_with_qmax) {
+TEST(QS8_DWCONV_MINMAX_FP32_UP2X25__SCALAR_FMAGIC, multipixel_with_qmax) {
for (size_t channels = 1; channels <= 10; channels += 1) {
DWConvMicrokernelTester()
.cr(2)
@@ -20835,22 +20835,22 @@
.channels(channels)
.width(3)
.qmax(128)
- .Test(xnn_qs8_dwconv_minmax_fp32_ukernel_up2x25__scalar_magic, xnn_init_qs8_conv_minmax_fp32_scalar_magic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qs8_dwconv_minmax_fp32_ukernel_up2x25__scalar_fmagic, xnn_init_qs8_conv_minmax_fp32_scalar_fmagic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
-TEST(QS8_DWCONV_MINMAX_FP32_UP2X25__SCALAR_MAGIC, input_offset) {
+TEST(QS8_DWCONV_MINMAX_FP32_UP2X25__SCALAR_FMAGIC, input_offset) {
for (uint32_t channels = 4; channels < 32; channels += 6) {
DWConvMicrokernelTester()
.cr(2)
.kr(25)
.channels(channels)
.input_offset(80)
- .Test(xnn_qs8_dwconv_minmax_fp32_ukernel_up2x25__scalar_magic, xnn_init_qs8_conv_minmax_fp32_scalar_magic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qs8_dwconv_minmax_fp32_ukernel_up2x25__scalar_fmagic, xnn_init_qs8_conv_minmax_fp32_scalar_fmagic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
-TEST(QS8_DWCONV_MINMAX_FP32_UP2X25__SCALAR_MAGIC, zero) {
+TEST(QS8_DWCONV_MINMAX_FP32_UP2X25__SCALAR_FMAGIC, zero) {
for (uint32_t mz = 0; mz < 25; mz++) {
for (uint32_t channels = 4; channels < 32; channels += 6) {
DWConvMicrokernelTester()
@@ -20859,105 +20859,105 @@
.channels(channels)
.input_offset(80)
.zero_index(mz)
- .Test(xnn_qs8_dwconv_minmax_fp32_ukernel_up2x25__scalar_magic, xnn_init_qs8_conv_minmax_fp32_scalar_magic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qs8_dwconv_minmax_fp32_ukernel_up2x25__scalar_fmagic, xnn_init_qs8_conv_minmax_fp32_scalar_fmagic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
}
-TEST(QS8_DWCONV_MINMAX_FP32_UP4X25__SCALAR_MAGIC, c_eq_4) {
+TEST(QS8_DWCONV_MINMAX_FP32_UP4X25__SCALAR_FMAGIC, c_eq_4) {
DWConvMicrokernelTester()
.cr(4)
.kr(25)
.channels(4)
- .Test(xnn_qs8_dwconv_minmax_fp32_ukernel_up4x25__scalar_magic, xnn_init_qs8_conv_minmax_fp32_scalar_magic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qs8_dwconv_minmax_fp32_ukernel_up4x25__scalar_fmagic, xnn_init_qs8_conv_minmax_fp32_scalar_fmagic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
-TEST(QS8_DWCONV_MINMAX_FP32_UP4X25__SCALAR_MAGIC, c_div_4) {
+TEST(QS8_DWCONV_MINMAX_FP32_UP4X25__SCALAR_FMAGIC, c_div_4) {
for (uint32_t channels = 8; channels < 64; channels += 12) {
DWConvMicrokernelTester()
.cr(4)
.kr(25)
.channels(channels)
- .Test(xnn_qs8_dwconv_minmax_fp32_ukernel_up4x25__scalar_magic, xnn_init_qs8_conv_minmax_fp32_scalar_magic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qs8_dwconv_minmax_fp32_ukernel_up4x25__scalar_fmagic, xnn_init_qs8_conv_minmax_fp32_scalar_fmagic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
-TEST(QS8_DWCONV_MINMAX_FP32_UP4X25__SCALAR_MAGIC, c_div_4_with_qmin) {
+TEST(QS8_DWCONV_MINMAX_FP32_UP4X25__SCALAR_FMAGIC, c_div_4_with_qmin) {
for (uint32_t channels = 8; channels < 64; channels += 12) {
DWConvMicrokernelTester()
.cr(4)
.kr(25)
.channels(channels)
.qmin(128)
- .Test(xnn_qs8_dwconv_minmax_fp32_ukernel_up4x25__scalar_magic, xnn_init_qs8_conv_minmax_fp32_scalar_magic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qs8_dwconv_minmax_fp32_ukernel_up4x25__scalar_fmagic, xnn_init_qs8_conv_minmax_fp32_scalar_fmagic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
-TEST(QS8_DWCONV_MINMAX_FP32_UP4X25__SCALAR_MAGIC, c_div_4_with_qmax) {
+TEST(QS8_DWCONV_MINMAX_FP32_UP4X25__SCALAR_FMAGIC, c_div_4_with_qmax) {
for (uint32_t channels = 8; channels < 64; channels += 12) {
DWConvMicrokernelTester()
.cr(4)
.kr(25)
.channels(channels)
.qmax(128)
- .Test(xnn_qs8_dwconv_minmax_fp32_ukernel_up4x25__scalar_magic, xnn_init_qs8_conv_minmax_fp32_scalar_magic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qs8_dwconv_minmax_fp32_ukernel_up4x25__scalar_fmagic, xnn_init_qs8_conv_minmax_fp32_scalar_fmagic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
-TEST(QS8_DWCONV_MINMAX_FP32_UP4X25__SCALAR_MAGIC, c_lt_4) {
+TEST(QS8_DWCONV_MINMAX_FP32_UP4X25__SCALAR_FMAGIC, c_lt_4) {
for (uint32_t channels = 1; channels < 4; channels++) {
DWConvMicrokernelTester()
.cr(4)
.kr(25)
.channels(channels)
- .Test(xnn_qs8_dwconv_minmax_fp32_ukernel_up4x25__scalar_magic, xnn_init_qs8_conv_minmax_fp32_scalar_magic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qs8_dwconv_minmax_fp32_ukernel_up4x25__scalar_fmagic, xnn_init_qs8_conv_minmax_fp32_scalar_fmagic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
-TEST(QS8_DWCONV_MINMAX_FP32_UP4X25__SCALAR_MAGIC, c_gt_4) {
+TEST(QS8_DWCONV_MINMAX_FP32_UP4X25__SCALAR_FMAGIC, c_gt_4) {
for (uint32_t channels = 5; channels < 8; channels++) {
DWConvMicrokernelTester()
.cr(4)
.kr(25)
.channels(channels)
- .Test(xnn_qs8_dwconv_minmax_fp32_ukernel_up4x25__scalar_magic, xnn_init_qs8_conv_minmax_fp32_scalar_magic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qs8_dwconv_minmax_fp32_ukernel_up4x25__scalar_fmagic, xnn_init_qs8_conv_minmax_fp32_scalar_fmagic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
-TEST(QS8_DWCONV_MINMAX_FP32_UP4X25__SCALAR_MAGIC, c_gt_4_with_qmin) {
+TEST(QS8_DWCONV_MINMAX_FP32_UP4X25__SCALAR_FMAGIC, c_gt_4_with_qmin) {
for (uint32_t channels = 5; channels < 8; channels++) {
DWConvMicrokernelTester()
.cr(4)
.kr(25)
.channels(channels)
.qmin(128)
- .Test(xnn_qs8_dwconv_minmax_fp32_ukernel_up4x25__scalar_magic, xnn_init_qs8_conv_minmax_fp32_scalar_magic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qs8_dwconv_minmax_fp32_ukernel_up4x25__scalar_fmagic, xnn_init_qs8_conv_minmax_fp32_scalar_fmagic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
-TEST(QS8_DWCONV_MINMAX_FP32_UP4X25__SCALAR_MAGIC, c_gt_4_with_qmax) {
+TEST(QS8_DWCONV_MINMAX_FP32_UP4X25__SCALAR_FMAGIC, c_gt_4_with_qmax) {
for (uint32_t channels = 5; channels < 8; channels++) {
DWConvMicrokernelTester()
.cr(4)
.kr(25)
.channels(channels)
.qmax(128)
- .Test(xnn_qs8_dwconv_minmax_fp32_ukernel_up4x25__scalar_magic, xnn_init_qs8_conv_minmax_fp32_scalar_magic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qs8_dwconv_minmax_fp32_ukernel_up4x25__scalar_fmagic, xnn_init_qs8_conv_minmax_fp32_scalar_fmagic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
-TEST(QS8_DWCONV_MINMAX_FP32_UP4X25__SCALAR_MAGIC, multipixel) {
+TEST(QS8_DWCONV_MINMAX_FP32_UP4X25__SCALAR_FMAGIC, multipixel) {
for (size_t channels = 1; channels <= 20; channels += 3) {
DWConvMicrokernelTester()
.cr(4)
.kr(25)
.channels(channels)
.width(3)
- .Test(xnn_qs8_dwconv_minmax_fp32_ukernel_up4x25__scalar_magic, xnn_init_qs8_conv_minmax_fp32_scalar_magic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qs8_dwconv_minmax_fp32_ukernel_up4x25__scalar_fmagic, xnn_init_qs8_conv_minmax_fp32_scalar_fmagic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
-TEST(QS8_DWCONV_MINMAX_FP32_UP4X25__SCALAR_MAGIC, multipixel_with_step) {
+TEST(QS8_DWCONV_MINMAX_FP32_UP4X25__SCALAR_FMAGIC, multipixel_with_step) {
for (size_t channels = 1; channels <= 20; channels += 3) {
for (size_t step = 2; step <= 25; step++) {
DWConvMicrokernelTester()
@@ -20966,12 +20966,12 @@
.channels(channels)
.width(3)
.step(step)
- .Test(xnn_qs8_dwconv_minmax_fp32_ukernel_up4x25__scalar_magic, xnn_init_qs8_conv_minmax_fp32_scalar_magic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qs8_dwconv_minmax_fp32_ukernel_up4x25__scalar_fmagic, xnn_init_qs8_conv_minmax_fp32_scalar_fmagic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
}
-TEST(QS8_DWCONV_MINMAX_FP32_UP4X25__SCALAR_MAGIC, multipixel_with_output_stride) {
+TEST(QS8_DWCONV_MINMAX_FP32_UP4X25__SCALAR_FMAGIC, multipixel_with_output_stride) {
for (size_t channels = 1; channels <= 20; channels += 3) {
DWConvMicrokernelTester()
.cr(4)
@@ -20979,11 +20979,11 @@
.channels(4)
.width(5)
.output_stride(23)
- .Test(xnn_qs8_dwconv_minmax_fp32_ukernel_up4x25__scalar_magic, xnn_init_qs8_conv_minmax_fp32_scalar_magic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qs8_dwconv_minmax_fp32_ukernel_up4x25__scalar_fmagic, xnn_init_qs8_conv_minmax_fp32_scalar_fmagic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
-TEST(QS8_DWCONV_MINMAX_FP32_UP4X25__SCALAR_MAGIC, multipixel_with_qmin) {
+TEST(QS8_DWCONV_MINMAX_FP32_UP4X25__SCALAR_FMAGIC, multipixel_with_qmin) {
for (size_t channels = 1; channels <= 20; channels += 3) {
DWConvMicrokernelTester()
.cr(4)
@@ -20991,11 +20991,11 @@
.channels(channels)
.width(3)
.qmin(128)
- .Test(xnn_qs8_dwconv_minmax_fp32_ukernel_up4x25__scalar_magic, xnn_init_qs8_conv_minmax_fp32_scalar_magic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qs8_dwconv_minmax_fp32_ukernel_up4x25__scalar_fmagic, xnn_init_qs8_conv_minmax_fp32_scalar_fmagic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
-TEST(QS8_DWCONV_MINMAX_FP32_UP4X25__SCALAR_MAGIC, multipixel_with_qmax) {
+TEST(QS8_DWCONV_MINMAX_FP32_UP4X25__SCALAR_FMAGIC, multipixel_with_qmax) {
for (size_t channels = 1; channels <= 20; channels += 3) {
DWConvMicrokernelTester()
.cr(4)
@@ -21003,22 +21003,22 @@
.channels(channels)
.width(3)
.qmax(128)
- .Test(xnn_qs8_dwconv_minmax_fp32_ukernel_up4x25__scalar_magic, xnn_init_qs8_conv_minmax_fp32_scalar_magic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qs8_dwconv_minmax_fp32_ukernel_up4x25__scalar_fmagic, xnn_init_qs8_conv_minmax_fp32_scalar_fmagic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
-TEST(QS8_DWCONV_MINMAX_FP32_UP4X25__SCALAR_MAGIC, input_offset) {
+TEST(QS8_DWCONV_MINMAX_FP32_UP4X25__SCALAR_FMAGIC, input_offset) {
for (uint32_t channels = 8; channels < 64; channels += 12) {
DWConvMicrokernelTester()
.cr(4)
.kr(25)
.channels(channels)
.input_offset(112)
- .Test(xnn_qs8_dwconv_minmax_fp32_ukernel_up4x25__scalar_magic, xnn_init_qs8_conv_minmax_fp32_scalar_magic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qs8_dwconv_minmax_fp32_ukernel_up4x25__scalar_fmagic, xnn_init_qs8_conv_minmax_fp32_scalar_fmagic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
-TEST(QS8_DWCONV_MINMAX_FP32_UP4X25__SCALAR_MAGIC, zero) {
+TEST(QS8_DWCONV_MINMAX_FP32_UP4X25__SCALAR_FMAGIC, zero) {
for (uint32_t mz = 0; mz < 25; mz++) {
for (uint32_t channels = 8; channels < 64; channels += 12) {
DWConvMicrokernelTester()
@@ -21027,7 +21027,7 @@
.channels(channels)
.input_offset(112)
.zero_index(mz)
- .Test(xnn_qs8_dwconv_minmax_fp32_ukernel_up4x25__scalar_magic, xnn_init_qs8_conv_minmax_fp32_scalar_magic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qs8_dwconv_minmax_fp32_ukernel_up4x25__scalar_fmagic, xnn_init_qs8_conv_minmax_fp32_scalar_fmagic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
}
\ No newline at end of file
diff --git a/test/qs8-dwconv-minmax-fp32.yaml b/test/qs8-dwconv-minmax-fp32.yaml
index cb5a959..fea6679 100644
--- a/test/qs8-dwconv-minmax-fp32.yaml
+++ b/test/qs8-dwconv-minmax-fp32.yaml
@@ -106,18 +106,18 @@
init: xnn_init_qs8_conv_minmax_fp32_wasmsimd_params
- name: xnn_qs8_dwconv_minmax_fp32_ukernel_up24x9__wasmsimd_mul16_add16
init: xnn_init_qs8_conv_minmax_fp32_wasmsimd_params
-- name: xnn_qs8_dwconv_minmax_fp32_ukernel_up1x9__scalar_lrint
- init: xnn_init_qs8_conv_minmax_fp32_scalar_lrint_params
-- name: xnn_qs8_dwconv_minmax_fp32_ukernel_up2x9__scalar_lrint
- init: xnn_init_qs8_conv_minmax_fp32_scalar_lrint_params
-- name: xnn_qs8_dwconv_minmax_fp32_ukernel_up4x9__scalar_lrint
- init: xnn_init_qs8_conv_minmax_fp32_scalar_lrint_params
-- name: xnn_qs8_dwconv_minmax_fp32_ukernel_up1x9__scalar_magic
- init: xnn_init_qs8_conv_minmax_fp32_scalar_magic_params
-- name: xnn_qs8_dwconv_minmax_fp32_ukernel_up2x9__scalar_magic
- init: xnn_init_qs8_conv_minmax_fp32_scalar_magic_params
-- name: xnn_qs8_dwconv_minmax_fp32_ukernel_up4x9__scalar_magic
- init: xnn_init_qs8_conv_minmax_fp32_scalar_magic_params
+- name: xnn_qs8_dwconv_minmax_fp32_ukernel_up1x9__scalar_lrintf
+ init: xnn_init_qs8_conv_minmax_fp32_scalar_lrintf_params
+- name: xnn_qs8_dwconv_minmax_fp32_ukernel_up2x9__scalar_lrintf
+ init: xnn_init_qs8_conv_minmax_fp32_scalar_lrintf_params
+- name: xnn_qs8_dwconv_minmax_fp32_ukernel_up4x9__scalar_lrintf
+ init: xnn_init_qs8_conv_minmax_fp32_scalar_lrintf_params
+- name: xnn_qs8_dwconv_minmax_fp32_ukernel_up1x9__scalar_fmagic
+ init: xnn_init_qs8_conv_minmax_fp32_scalar_fmagic_params
+- name: xnn_qs8_dwconv_minmax_fp32_ukernel_up2x9__scalar_fmagic
+ init: xnn_init_qs8_conv_minmax_fp32_scalar_fmagic_params
+- name: xnn_qs8_dwconv_minmax_fp32_ukernel_up4x9__scalar_fmagic
+ init: xnn_init_qs8_conv_minmax_fp32_scalar_fmagic_params
- name: xnn_qs8_dwconv_minmax_fp32_ukernel_up8x25__neon_mul16
init: xnn_init_qs8_conv_minmax_fp32_neon_params
- name: xnn_qs8_dwconv_minmax_fp32_ukernel_up16x25__neon_mul16
@@ -222,15 +222,15 @@
init: xnn_init_qs8_conv_minmax_fp32_wasmsimd_params
- name: xnn_qs8_dwconv_minmax_fp32_ukernel_up24x25__wasmsimd_mul16_add16
init: xnn_init_qs8_conv_minmax_fp32_wasmsimd_params
-- name: xnn_qs8_dwconv_minmax_fp32_ukernel_up1x25__scalar_lrint
- init: xnn_init_qs8_conv_minmax_fp32_scalar_lrint_params
-- name: xnn_qs8_dwconv_minmax_fp32_ukernel_up2x25__scalar_lrint
- init: xnn_init_qs8_conv_minmax_fp32_scalar_lrint_params
-- name: xnn_qs8_dwconv_minmax_fp32_ukernel_up4x25__scalar_lrint
- init: xnn_init_qs8_conv_minmax_fp32_scalar_lrint_params
-- name: xnn_qs8_dwconv_minmax_fp32_ukernel_up1x25__scalar_magic
- init: xnn_init_qs8_conv_minmax_fp32_scalar_magic_params
-- name: xnn_qs8_dwconv_minmax_fp32_ukernel_up2x25__scalar_magic
- init: xnn_init_qs8_conv_minmax_fp32_scalar_magic_params
-- name: xnn_qs8_dwconv_minmax_fp32_ukernel_up4x25__scalar_magic
- init: xnn_init_qs8_conv_minmax_fp32_scalar_magic_params
+- name: xnn_qs8_dwconv_minmax_fp32_ukernel_up1x25__scalar_lrintf
+ init: xnn_init_qs8_conv_minmax_fp32_scalar_lrintf_params
+- name: xnn_qs8_dwconv_minmax_fp32_ukernel_up2x25__scalar_lrintf
+ init: xnn_init_qs8_conv_minmax_fp32_scalar_lrintf_params
+- name: xnn_qs8_dwconv_minmax_fp32_ukernel_up4x25__scalar_lrintf
+ init: xnn_init_qs8_conv_minmax_fp32_scalar_lrintf_params
+- name: xnn_qs8_dwconv_minmax_fp32_ukernel_up1x25__scalar_fmagic
+ init: xnn_init_qs8_conv_minmax_fp32_scalar_fmagic_params
+- name: xnn_qs8_dwconv_minmax_fp32_ukernel_up2x25__scalar_fmagic
+ init: xnn_init_qs8_conv_minmax_fp32_scalar_fmagic_params
+- name: xnn_qs8_dwconv_minmax_fp32_ukernel_up4x25__scalar_fmagic
+ init: xnn_init_qs8_conv_minmax_fp32_scalar_fmagic_params
diff --git a/test/qs8-gemm-minmax-fp32.cc b/test/qs8-gemm-minmax-fp32.cc
index 17f3966..34b0430 100644
--- a/test/qs8-gemm-minmax-fp32.cc
+++ b/test/qs8-gemm-minmax-fp32.cc
@@ -94107,7 +94107,7 @@
#endif // XNN_ARCH_WASMSIMD || XNN_ARCH_WASMRELAXEDSIMD
-TEST(QS8_GEMM_MINMAX_FP32_1X2__SCALAR_LRINT, k_eq_1) {
+TEST(QS8_GEMM_MINMAX_FP32_1X2__SCALAR_LRINTF, k_eq_1) {
GemmMicrokernelTester()
.mr(1)
.nr(2)
@@ -94116,10 +94116,10 @@
.m(1)
.n(2)
.k(1)
- .Test(xnn_qs8_gemm_minmax_fp32_ukernel_1x2__scalar_lrint, xnn_init_qs8_conv_minmax_fp32_scalar_lrint_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qs8_gemm_minmax_fp32_ukernel_1x2__scalar_lrintf, xnn_init_qs8_conv_minmax_fp32_scalar_lrintf_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
-TEST(QS8_GEMM_MINMAX_FP32_1X2__SCALAR_LRINT, strided_cn) {
+TEST(QS8_GEMM_MINMAX_FP32_1X2__SCALAR_LRINTF, strided_cn) {
GemmMicrokernelTester()
.mr(1)
.nr(2)
@@ -94129,10 +94129,10 @@
.n(2)
.k(1)
.cn_stride(5)
- .Test(xnn_qs8_gemm_minmax_fp32_ukernel_1x2__scalar_lrint, xnn_init_qs8_conv_minmax_fp32_scalar_lrint_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qs8_gemm_minmax_fp32_ukernel_1x2__scalar_lrintf, xnn_init_qs8_conv_minmax_fp32_scalar_lrintf_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
-TEST(QS8_GEMM_MINMAX_FP32_1X2__SCALAR_LRINT, k_eq_1_strided_a) {
+TEST(QS8_GEMM_MINMAX_FP32_1X2__SCALAR_LRINTF, k_eq_1_strided_a) {
GemmMicrokernelTester()
.mr(1)
.nr(2)
@@ -94142,10 +94142,10 @@
.n(2)
.k(1)
.a_stride(3)
- .Test(xnn_qs8_gemm_minmax_fp32_ukernel_1x2__scalar_lrint, xnn_init_qs8_conv_minmax_fp32_scalar_lrint_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qs8_gemm_minmax_fp32_ukernel_1x2__scalar_lrintf, xnn_init_qs8_conv_minmax_fp32_scalar_lrintf_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
-TEST(QS8_GEMM_MINMAX_FP32_1X2__SCALAR_LRINT, k_eq_1_subtile) {
+TEST(QS8_GEMM_MINMAX_FP32_1X2__SCALAR_LRINTF, k_eq_1_subtile) {
for (uint32_t m = 1; m <= 1; m++) {
for (uint32_t n = 1; n <= 2; n++) {
GemmMicrokernelTester()
@@ -94157,12 +94157,12 @@
.n(n)
.k(1)
.iterations(1)
- .Test(xnn_qs8_gemm_minmax_fp32_ukernel_1x2__scalar_lrint, xnn_init_qs8_conv_minmax_fp32_scalar_lrint_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qs8_gemm_minmax_fp32_ukernel_1x2__scalar_lrintf, xnn_init_qs8_conv_minmax_fp32_scalar_lrintf_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
}
-TEST(QS8_GEMM_MINMAX_FP32_1X2__SCALAR_LRINT, k_eq_1_subtile_m) {
+TEST(QS8_GEMM_MINMAX_FP32_1X2__SCALAR_LRINTF, k_eq_1_subtile_m) {
for (uint32_t m = 1; m <= 1; m++) {
GemmMicrokernelTester()
.mr(1)
@@ -94173,11 +94173,11 @@
.n(2)
.k(1)
.iterations(1)
- .Test(xnn_qs8_gemm_minmax_fp32_ukernel_1x2__scalar_lrint, xnn_init_qs8_conv_minmax_fp32_scalar_lrint_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qs8_gemm_minmax_fp32_ukernel_1x2__scalar_lrintf, xnn_init_qs8_conv_minmax_fp32_scalar_lrintf_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
-TEST(QS8_GEMM_MINMAX_FP32_1X2__SCALAR_LRINT, k_eq_1_subtile_n) {
+TEST(QS8_GEMM_MINMAX_FP32_1X2__SCALAR_LRINTF, k_eq_1_subtile_n) {
for (uint32_t n = 1; n <= 2; n++) {
GemmMicrokernelTester()
.mr(1)
@@ -94188,11 +94188,11 @@
.n(n)
.k(1)
.iterations(1)
- .Test(xnn_qs8_gemm_minmax_fp32_ukernel_1x2__scalar_lrint, xnn_init_qs8_conv_minmax_fp32_scalar_lrint_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qs8_gemm_minmax_fp32_ukernel_1x2__scalar_lrintf, xnn_init_qs8_conv_minmax_fp32_scalar_lrintf_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
-TEST(QS8_GEMM_MINMAX_FP32_1X2__SCALAR_LRINT, k_gt_1) {
+TEST(QS8_GEMM_MINMAX_FP32_1X2__SCALAR_LRINTF, k_gt_1) {
for (size_t k = 2; k < 10; k++) {
GemmMicrokernelTester()
.mr(1)
@@ -94202,11 +94202,11 @@
.m(1)
.n(2)
.k(k)
- .Test(xnn_qs8_gemm_minmax_fp32_ukernel_1x2__scalar_lrint, xnn_init_qs8_conv_minmax_fp32_scalar_lrint_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qs8_gemm_minmax_fp32_ukernel_1x2__scalar_lrintf, xnn_init_qs8_conv_minmax_fp32_scalar_lrintf_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
-TEST(QS8_GEMM_MINMAX_FP32_1X2__SCALAR_LRINT, k_gt_1_strided_a) {
+TEST(QS8_GEMM_MINMAX_FP32_1X2__SCALAR_LRINTF, k_gt_1_strided_a) {
for (size_t k = 2; k < 10; k++) {
GemmMicrokernelTester()
.mr(1)
@@ -94217,11 +94217,11 @@
.n(2)
.k(k)
.a_stride(11)
- .Test(xnn_qs8_gemm_minmax_fp32_ukernel_1x2__scalar_lrint, xnn_init_qs8_conv_minmax_fp32_scalar_lrint_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qs8_gemm_minmax_fp32_ukernel_1x2__scalar_lrintf, xnn_init_qs8_conv_minmax_fp32_scalar_lrintf_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
-TEST(QS8_GEMM_MINMAX_FP32_1X2__SCALAR_LRINT, k_gt_1_subtile) {
+TEST(QS8_GEMM_MINMAX_FP32_1X2__SCALAR_LRINTF, k_gt_1_subtile) {
for (size_t k = 2; k < 10; k++) {
for (uint32_t m = 1; m <= 1; m++) {
for (uint32_t n = 1; n <= 2; n++) {
@@ -94234,13 +94234,13 @@
.n(n)
.k(k)
.iterations(1)
- .Test(xnn_qs8_gemm_minmax_fp32_ukernel_1x2__scalar_lrint, xnn_init_qs8_conv_minmax_fp32_scalar_lrint_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qs8_gemm_minmax_fp32_ukernel_1x2__scalar_lrintf, xnn_init_qs8_conv_minmax_fp32_scalar_lrintf_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
}
}
-TEST(QS8_GEMM_MINMAX_FP32_1X2__SCALAR_LRINT, n_gt_2) {
+TEST(QS8_GEMM_MINMAX_FP32_1X2__SCALAR_LRINTF, n_gt_2) {
for (uint32_t n = 3; n < 4; n++) {
for (size_t k = 1; k <= 5; k += 2) {
GemmMicrokernelTester()
@@ -94251,12 +94251,12 @@
.m(1)
.n(2)
.k(k)
- .Test(xnn_qs8_gemm_minmax_fp32_ukernel_1x2__scalar_lrint, xnn_init_qs8_conv_minmax_fp32_scalar_lrint_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qs8_gemm_minmax_fp32_ukernel_1x2__scalar_lrintf, xnn_init_qs8_conv_minmax_fp32_scalar_lrintf_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
}
-TEST(QS8_GEMM_MINMAX_FP32_1X2__SCALAR_LRINT, n_gt_2_strided_cn) {
+TEST(QS8_GEMM_MINMAX_FP32_1X2__SCALAR_LRINTF, n_gt_2_strided_cn) {
for (uint32_t n = 3; n < 4; n++) {
for (size_t k = 1; k <= 5; k += 2) {
GemmMicrokernelTester()
@@ -94268,12 +94268,12 @@
.n(2)
.k(k)
.cn_stride(5)
- .Test(xnn_qs8_gemm_minmax_fp32_ukernel_1x2__scalar_lrint, xnn_init_qs8_conv_minmax_fp32_scalar_lrint_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qs8_gemm_minmax_fp32_ukernel_1x2__scalar_lrintf, xnn_init_qs8_conv_minmax_fp32_scalar_lrintf_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
}
-TEST(QS8_GEMM_MINMAX_FP32_1X2__SCALAR_LRINT, n_gt_2_strided_a) {
+TEST(QS8_GEMM_MINMAX_FP32_1X2__SCALAR_LRINTF, n_gt_2_strided_a) {
for (uint32_t n = 3; n < 4; n++) {
for (size_t k = 1; k <= 5; k += 2) {
GemmMicrokernelTester()
@@ -94285,12 +94285,12 @@
.n(n)
.k(k)
.a_stride(7)
- .Test(xnn_qs8_gemm_minmax_fp32_ukernel_1x2__scalar_lrint, xnn_init_qs8_conv_minmax_fp32_scalar_lrint_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qs8_gemm_minmax_fp32_ukernel_1x2__scalar_lrintf, xnn_init_qs8_conv_minmax_fp32_scalar_lrintf_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
}
-TEST(QS8_GEMM_MINMAX_FP32_1X2__SCALAR_LRINT, n_gt_2_subtile) {
+TEST(QS8_GEMM_MINMAX_FP32_1X2__SCALAR_LRINTF, n_gt_2_subtile) {
for (uint32_t n = 3; n < 4; n++) {
for (size_t k = 1; k <= 5; k += 2) {
for (uint32_t m = 1; m <= 1; m++) {
@@ -94303,13 +94303,13 @@
.n(n)
.k(k)
.iterations(1)
- .Test(xnn_qs8_gemm_minmax_fp32_ukernel_1x2__scalar_lrint, xnn_init_qs8_conv_minmax_fp32_scalar_lrint_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qs8_gemm_minmax_fp32_ukernel_1x2__scalar_lrintf, xnn_init_qs8_conv_minmax_fp32_scalar_lrintf_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
}
}
-TEST(QS8_GEMM_MINMAX_FP32_1X2__SCALAR_LRINT, n_div_2) {
+TEST(QS8_GEMM_MINMAX_FP32_1X2__SCALAR_LRINTF, n_div_2) {
for (uint32_t n = 4; n <= 6; n += 2) {
for (size_t k = 1; k <= 5; k += 2) {
GemmMicrokernelTester()
@@ -94320,12 +94320,12 @@
.m(1)
.n(2)
.k(k)
- .Test(xnn_qs8_gemm_minmax_fp32_ukernel_1x2__scalar_lrint, xnn_init_qs8_conv_minmax_fp32_scalar_lrint_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qs8_gemm_minmax_fp32_ukernel_1x2__scalar_lrintf, xnn_init_qs8_conv_minmax_fp32_scalar_lrintf_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
}
-TEST(QS8_GEMM_MINMAX_FP32_1X2__SCALAR_LRINT, n_div_2_strided_cn) {
+TEST(QS8_GEMM_MINMAX_FP32_1X2__SCALAR_LRINTF, n_div_2_strided_cn) {
for (uint32_t n = 4; n <= 6; n += 2) {
for (size_t k = 1; k <= 5; k += 2) {
GemmMicrokernelTester()
@@ -94337,12 +94337,12 @@
.n(n)
.k(k)
.cn_stride(5)
- .Test(xnn_qs8_gemm_minmax_fp32_ukernel_1x2__scalar_lrint, xnn_init_qs8_conv_minmax_fp32_scalar_lrint_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qs8_gemm_minmax_fp32_ukernel_1x2__scalar_lrintf, xnn_init_qs8_conv_minmax_fp32_scalar_lrintf_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
}
-TEST(QS8_GEMM_MINMAX_FP32_1X2__SCALAR_LRINT, n_div_2_strided_a) {
+TEST(QS8_GEMM_MINMAX_FP32_1X2__SCALAR_LRINTF, n_div_2_strided_a) {
for (uint32_t n = 4; n <= 6; n += 2) {
for (size_t k = 1; k <= 5; k += 2) {
GemmMicrokernelTester()
@@ -94354,12 +94354,12 @@
.n(n)
.k(k)
.a_stride(7)
- .Test(xnn_qs8_gemm_minmax_fp32_ukernel_1x2__scalar_lrint, xnn_init_qs8_conv_minmax_fp32_scalar_lrint_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qs8_gemm_minmax_fp32_ukernel_1x2__scalar_lrintf, xnn_init_qs8_conv_minmax_fp32_scalar_lrintf_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
}
-TEST(QS8_GEMM_MINMAX_FP32_1X2__SCALAR_LRINT, n_div_2_subtile) {
+TEST(QS8_GEMM_MINMAX_FP32_1X2__SCALAR_LRINTF, n_div_2_subtile) {
for (uint32_t n = 4; n <= 6; n += 2) {
for (size_t k = 1; k <= 5; k += 2) {
for (uint32_t m = 1; m <= 1; m++) {
@@ -94372,13 +94372,13 @@
.n(n)
.k(k)
.iterations(1)
- .Test(xnn_qs8_gemm_minmax_fp32_ukernel_1x2__scalar_lrint, xnn_init_qs8_conv_minmax_fp32_scalar_lrint_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qs8_gemm_minmax_fp32_ukernel_1x2__scalar_lrintf, xnn_init_qs8_conv_minmax_fp32_scalar_lrintf_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
}
}
-TEST(QS8_GEMM_MINMAX_FP32_1X2__SCALAR_LRINT, strided_cm_subtile) {
+TEST(QS8_GEMM_MINMAX_FP32_1X2__SCALAR_LRINTF, strided_cm_subtile) {
for (size_t k = 1; k <= 5; k += 2) {
for (uint32_t m = 1; m <= 1; m++) {
for (uint32_t n = 1; n <= 2; n++) {
@@ -94392,13 +94392,13 @@
.k(k)
.cm_stride(5)
.iterations(1)
- .Test(xnn_qs8_gemm_minmax_fp32_ukernel_1x2__scalar_lrint, xnn_init_qs8_conv_minmax_fp32_scalar_lrint_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qs8_gemm_minmax_fp32_ukernel_1x2__scalar_lrintf, xnn_init_qs8_conv_minmax_fp32_scalar_lrintf_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
}
}
-TEST(QS8_GEMM_MINMAX_FP32_1X2__SCALAR_LRINT, qmin) {
+TEST(QS8_GEMM_MINMAX_FP32_1X2__SCALAR_LRINTF, qmin) {
GemmMicrokernelTester()
.mr(1)
.nr(2)
@@ -94408,10 +94408,10 @@
.n(2)
.k(1)
.qmin(128)
- .Test(xnn_qs8_gemm_minmax_fp32_ukernel_1x2__scalar_lrint, xnn_init_qs8_conv_minmax_fp32_scalar_lrint_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qs8_gemm_minmax_fp32_ukernel_1x2__scalar_lrintf, xnn_init_qs8_conv_minmax_fp32_scalar_lrintf_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
-TEST(QS8_GEMM_MINMAX_FP32_1X2__SCALAR_LRINT, qmax) {
+TEST(QS8_GEMM_MINMAX_FP32_1X2__SCALAR_LRINTF, qmax) {
GemmMicrokernelTester()
.mr(1)
.nr(2)
@@ -94421,10 +94421,10 @@
.n(2)
.k(1)
.qmax(128)
- .Test(xnn_qs8_gemm_minmax_fp32_ukernel_1x2__scalar_lrint, xnn_init_qs8_conv_minmax_fp32_scalar_lrint_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qs8_gemm_minmax_fp32_ukernel_1x2__scalar_lrintf, xnn_init_qs8_conv_minmax_fp32_scalar_lrintf_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
-TEST(QS8_GEMM_MINMAX_FP32_1X2__SCALAR_LRINT, strided_cm) {
+TEST(QS8_GEMM_MINMAX_FP32_1X2__SCALAR_LRINTF, strided_cm) {
GemmMicrokernelTester()
.mr(1)
.nr(2)
@@ -94434,11 +94434,11 @@
.n(2)
.k(1)
.cm_stride(5)
- .Test(xnn_qs8_gemm_minmax_fp32_ukernel_1x2__scalar_lrint, xnn_init_qs8_conv_minmax_fp32_scalar_lrint_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qs8_gemm_minmax_fp32_ukernel_1x2__scalar_lrintf, xnn_init_qs8_conv_minmax_fp32_scalar_lrintf_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
-TEST(QS8_GEMM_MINMAX_FP32_2X2__SCALAR_LRINT, k_eq_1) {
+TEST(QS8_GEMM_MINMAX_FP32_2X2__SCALAR_LRINTF, k_eq_1) {
GemmMicrokernelTester()
.mr(2)
.nr(2)
@@ -94447,10 +94447,10 @@
.m(2)
.n(2)
.k(1)
- .Test(xnn_qs8_gemm_minmax_fp32_ukernel_2x2__scalar_lrint, xnn_init_qs8_conv_minmax_fp32_scalar_lrint_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qs8_gemm_minmax_fp32_ukernel_2x2__scalar_lrintf, xnn_init_qs8_conv_minmax_fp32_scalar_lrintf_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
-TEST(QS8_GEMM_MINMAX_FP32_2X2__SCALAR_LRINT, strided_cn) {
+TEST(QS8_GEMM_MINMAX_FP32_2X2__SCALAR_LRINTF, strided_cn) {
GemmMicrokernelTester()
.mr(2)
.nr(2)
@@ -94460,10 +94460,10 @@
.n(2)
.k(1)
.cn_stride(5)
- .Test(xnn_qs8_gemm_minmax_fp32_ukernel_2x2__scalar_lrint, xnn_init_qs8_conv_minmax_fp32_scalar_lrint_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qs8_gemm_minmax_fp32_ukernel_2x2__scalar_lrintf, xnn_init_qs8_conv_minmax_fp32_scalar_lrintf_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
-TEST(QS8_GEMM_MINMAX_FP32_2X2__SCALAR_LRINT, k_eq_1_strided_a) {
+TEST(QS8_GEMM_MINMAX_FP32_2X2__SCALAR_LRINTF, k_eq_1_strided_a) {
GemmMicrokernelTester()
.mr(2)
.nr(2)
@@ -94473,10 +94473,10 @@
.n(2)
.k(1)
.a_stride(3)
- .Test(xnn_qs8_gemm_minmax_fp32_ukernel_2x2__scalar_lrint, xnn_init_qs8_conv_minmax_fp32_scalar_lrint_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qs8_gemm_minmax_fp32_ukernel_2x2__scalar_lrintf, xnn_init_qs8_conv_minmax_fp32_scalar_lrintf_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
-TEST(QS8_GEMM_MINMAX_FP32_2X2__SCALAR_LRINT, k_eq_1_subtile) {
+TEST(QS8_GEMM_MINMAX_FP32_2X2__SCALAR_LRINTF, k_eq_1_subtile) {
for (uint32_t m = 1; m <= 2; m++) {
for (uint32_t n = 1; n <= 2; n++) {
GemmMicrokernelTester()
@@ -94488,12 +94488,12 @@
.n(n)
.k(1)
.iterations(1)
- .Test(xnn_qs8_gemm_minmax_fp32_ukernel_2x2__scalar_lrint, xnn_init_qs8_conv_minmax_fp32_scalar_lrint_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qs8_gemm_minmax_fp32_ukernel_2x2__scalar_lrintf, xnn_init_qs8_conv_minmax_fp32_scalar_lrintf_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
}
-TEST(QS8_GEMM_MINMAX_FP32_2X2__SCALAR_LRINT, k_eq_1_subtile_m) {
+TEST(QS8_GEMM_MINMAX_FP32_2X2__SCALAR_LRINTF, k_eq_1_subtile_m) {
for (uint32_t m = 1; m <= 2; m++) {
GemmMicrokernelTester()
.mr(2)
@@ -94504,11 +94504,11 @@
.n(2)
.k(1)
.iterations(1)
- .Test(xnn_qs8_gemm_minmax_fp32_ukernel_2x2__scalar_lrint, xnn_init_qs8_conv_minmax_fp32_scalar_lrint_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qs8_gemm_minmax_fp32_ukernel_2x2__scalar_lrintf, xnn_init_qs8_conv_minmax_fp32_scalar_lrintf_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
-TEST(QS8_GEMM_MINMAX_FP32_2X2__SCALAR_LRINT, k_eq_1_subtile_n) {
+TEST(QS8_GEMM_MINMAX_FP32_2X2__SCALAR_LRINTF, k_eq_1_subtile_n) {
for (uint32_t n = 1; n <= 2; n++) {
GemmMicrokernelTester()
.mr(2)
@@ -94519,11 +94519,11 @@
.n(n)
.k(1)
.iterations(1)
- .Test(xnn_qs8_gemm_minmax_fp32_ukernel_2x2__scalar_lrint, xnn_init_qs8_conv_minmax_fp32_scalar_lrint_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qs8_gemm_minmax_fp32_ukernel_2x2__scalar_lrintf, xnn_init_qs8_conv_minmax_fp32_scalar_lrintf_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
-TEST(QS8_GEMM_MINMAX_FP32_2X2__SCALAR_LRINT, k_gt_1) {
+TEST(QS8_GEMM_MINMAX_FP32_2X2__SCALAR_LRINTF, k_gt_1) {
for (size_t k = 2; k < 10; k++) {
GemmMicrokernelTester()
.mr(2)
@@ -94533,11 +94533,11 @@
.m(2)
.n(2)
.k(k)
- .Test(xnn_qs8_gemm_minmax_fp32_ukernel_2x2__scalar_lrint, xnn_init_qs8_conv_minmax_fp32_scalar_lrint_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qs8_gemm_minmax_fp32_ukernel_2x2__scalar_lrintf, xnn_init_qs8_conv_minmax_fp32_scalar_lrintf_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
-TEST(QS8_GEMM_MINMAX_FP32_2X2__SCALAR_LRINT, k_gt_1_strided_a) {
+TEST(QS8_GEMM_MINMAX_FP32_2X2__SCALAR_LRINTF, k_gt_1_strided_a) {
for (size_t k = 2; k < 10; k++) {
GemmMicrokernelTester()
.mr(2)
@@ -94548,11 +94548,11 @@
.n(2)
.k(k)
.a_stride(11)
- .Test(xnn_qs8_gemm_minmax_fp32_ukernel_2x2__scalar_lrint, xnn_init_qs8_conv_minmax_fp32_scalar_lrint_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qs8_gemm_minmax_fp32_ukernel_2x2__scalar_lrintf, xnn_init_qs8_conv_minmax_fp32_scalar_lrintf_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
-TEST(QS8_GEMM_MINMAX_FP32_2X2__SCALAR_LRINT, k_gt_1_subtile) {
+TEST(QS8_GEMM_MINMAX_FP32_2X2__SCALAR_LRINTF, k_gt_1_subtile) {
for (size_t k = 2; k < 10; k++) {
for (uint32_t m = 1; m <= 2; m++) {
for (uint32_t n = 1; n <= 2; n++) {
@@ -94565,13 +94565,13 @@
.n(n)
.k(k)
.iterations(1)
- .Test(xnn_qs8_gemm_minmax_fp32_ukernel_2x2__scalar_lrint, xnn_init_qs8_conv_minmax_fp32_scalar_lrint_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qs8_gemm_minmax_fp32_ukernel_2x2__scalar_lrintf, xnn_init_qs8_conv_minmax_fp32_scalar_lrintf_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
}
}
-TEST(QS8_GEMM_MINMAX_FP32_2X2__SCALAR_LRINT, n_gt_2) {
+TEST(QS8_GEMM_MINMAX_FP32_2X2__SCALAR_LRINTF, n_gt_2) {
for (uint32_t n = 3; n < 4; n++) {
for (size_t k = 1; k <= 5; k += 2) {
GemmMicrokernelTester()
@@ -94582,12 +94582,12 @@
.m(2)
.n(2)
.k(k)
- .Test(xnn_qs8_gemm_minmax_fp32_ukernel_2x2__scalar_lrint, xnn_init_qs8_conv_minmax_fp32_scalar_lrint_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qs8_gemm_minmax_fp32_ukernel_2x2__scalar_lrintf, xnn_init_qs8_conv_minmax_fp32_scalar_lrintf_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
}
-TEST(QS8_GEMM_MINMAX_FP32_2X2__SCALAR_LRINT, n_gt_2_strided_cn) {
+TEST(QS8_GEMM_MINMAX_FP32_2X2__SCALAR_LRINTF, n_gt_2_strided_cn) {
for (uint32_t n = 3; n < 4; n++) {
for (size_t k = 1; k <= 5; k += 2) {
GemmMicrokernelTester()
@@ -94599,12 +94599,12 @@
.n(2)
.k(k)
.cn_stride(5)
- .Test(xnn_qs8_gemm_minmax_fp32_ukernel_2x2__scalar_lrint, xnn_init_qs8_conv_minmax_fp32_scalar_lrint_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qs8_gemm_minmax_fp32_ukernel_2x2__scalar_lrintf, xnn_init_qs8_conv_minmax_fp32_scalar_lrintf_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
}
-TEST(QS8_GEMM_MINMAX_FP32_2X2__SCALAR_LRINT, n_gt_2_strided_a) {
+TEST(QS8_GEMM_MINMAX_FP32_2X2__SCALAR_LRINTF, n_gt_2_strided_a) {
for (uint32_t n = 3; n < 4; n++) {
for (size_t k = 1; k <= 5; k += 2) {
GemmMicrokernelTester()
@@ -94616,12 +94616,12 @@
.n(n)
.k(k)
.a_stride(7)
- .Test(xnn_qs8_gemm_minmax_fp32_ukernel_2x2__scalar_lrint, xnn_init_qs8_conv_minmax_fp32_scalar_lrint_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qs8_gemm_minmax_fp32_ukernel_2x2__scalar_lrintf, xnn_init_qs8_conv_minmax_fp32_scalar_lrintf_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
}
-TEST(QS8_GEMM_MINMAX_FP32_2X2__SCALAR_LRINT, n_gt_2_subtile) {
+TEST(QS8_GEMM_MINMAX_FP32_2X2__SCALAR_LRINTF, n_gt_2_subtile) {
for (uint32_t n = 3; n < 4; n++) {
for (size_t k = 1; k <= 5; k += 2) {
for (uint32_t m = 1; m <= 2; m++) {
@@ -94634,13 +94634,13 @@
.n(n)
.k(k)
.iterations(1)
- .Test(xnn_qs8_gemm_minmax_fp32_ukernel_2x2__scalar_lrint, xnn_init_qs8_conv_minmax_fp32_scalar_lrint_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qs8_gemm_minmax_fp32_ukernel_2x2__scalar_lrintf, xnn_init_qs8_conv_minmax_fp32_scalar_lrintf_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
}
}
-TEST(QS8_GEMM_MINMAX_FP32_2X2__SCALAR_LRINT, n_div_2) {
+TEST(QS8_GEMM_MINMAX_FP32_2X2__SCALAR_LRINTF, n_div_2) {
for (uint32_t n = 4; n <= 6; n += 2) {
for (size_t k = 1; k <= 5; k += 2) {
GemmMicrokernelTester()
@@ -94651,12 +94651,12 @@
.m(2)
.n(2)
.k(k)
- .Test(xnn_qs8_gemm_minmax_fp32_ukernel_2x2__scalar_lrint, xnn_init_qs8_conv_minmax_fp32_scalar_lrint_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qs8_gemm_minmax_fp32_ukernel_2x2__scalar_lrintf, xnn_init_qs8_conv_minmax_fp32_scalar_lrintf_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
}
-TEST(QS8_GEMM_MINMAX_FP32_2X2__SCALAR_LRINT, n_div_2_strided_cn) {
+TEST(QS8_GEMM_MINMAX_FP32_2X2__SCALAR_LRINTF, n_div_2_strided_cn) {
for (uint32_t n = 4; n <= 6; n += 2) {
for (size_t k = 1; k <= 5; k += 2) {
GemmMicrokernelTester()
@@ -94668,12 +94668,12 @@
.n(n)
.k(k)
.cn_stride(5)
- .Test(xnn_qs8_gemm_minmax_fp32_ukernel_2x2__scalar_lrint, xnn_init_qs8_conv_minmax_fp32_scalar_lrint_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qs8_gemm_minmax_fp32_ukernel_2x2__scalar_lrintf, xnn_init_qs8_conv_minmax_fp32_scalar_lrintf_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
}
-TEST(QS8_GEMM_MINMAX_FP32_2X2__SCALAR_LRINT, n_div_2_strided_a) {
+TEST(QS8_GEMM_MINMAX_FP32_2X2__SCALAR_LRINTF, n_div_2_strided_a) {
for (uint32_t n = 4; n <= 6; n += 2) {
for (size_t k = 1; k <= 5; k += 2) {
GemmMicrokernelTester()
@@ -94685,12 +94685,12 @@
.n(n)
.k(k)
.a_stride(7)
- .Test(xnn_qs8_gemm_minmax_fp32_ukernel_2x2__scalar_lrint, xnn_init_qs8_conv_minmax_fp32_scalar_lrint_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qs8_gemm_minmax_fp32_ukernel_2x2__scalar_lrintf, xnn_init_qs8_conv_minmax_fp32_scalar_lrintf_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
}
-TEST(QS8_GEMM_MINMAX_FP32_2X2__SCALAR_LRINT, n_div_2_subtile) {
+TEST(QS8_GEMM_MINMAX_FP32_2X2__SCALAR_LRINTF, n_div_2_subtile) {
for (uint32_t n = 4; n <= 6; n += 2) {
for (size_t k = 1; k <= 5; k += 2) {
for (uint32_t m = 1; m <= 2; m++) {
@@ -94703,13 +94703,13 @@
.n(n)
.k(k)
.iterations(1)
- .Test(xnn_qs8_gemm_minmax_fp32_ukernel_2x2__scalar_lrint, xnn_init_qs8_conv_minmax_fp32_scalar_lrint_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qs8_gemm_minmax_fp32_ukernel_2x2__scalar_lrintf, xnn_init_qs8_conv_minmax_fp32_scalar_lrintf_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
}
}
-TEST(QS8_GEMM_MINMAX_FP32_2X2__SCALAR_LRINT, strided_cm_subtile) {
+TEST(QS8_GEMM_MINMAX_FP32_2X2__SCALAR_LRINTF, strided_cm_subtile) {
for (size_t k = 1; k <= 5; k += 2) {
for (uint32_t m = 1; m <= 2; m++) {
for (uint32_t n = 1; n <= 2; n++) {
@@ -94723,13 +94723,13 @@
.k(k)
.cm_stride(5)
.iterations(1)
- .Test(xnn_qs8_gemm_minmax_fp32_ukernel_2x2__scalar_lrint, xnn_init_qs8_conv_minmax_fp32_scalar_lrint_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qs8_gemm_minmax_fp32_ukernel_2x2__scalar_lrintf, xnn_init_qs8_conv_minmax_fp32_scalar_lrintf_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
}
}
-TEST(QS8_GEMM_MINMAX_FP32_2X2__SCALAR_LRINT, qmin) {
+TEST(QS8_GEMM_MINMAX_FP32_2X2__SCALAR_LRINTF, qmin) {
GemmMicrokernelTester()
.mr(2)
.nr(2)
@@ -94739,10 +94739,10 @@
.n(2)
.k(1)
.qmin(128)
- .Test(xnn_qs8_gemm_minmax_fp32_ukernel_2x2__scalar_lrint, xnn_init_qs8_conv_minmax_fp32_scalar_lrint_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qs8_gemm_minmax_fp32_ukernel_2x2__scalar_lrintf, xnn_init_qs8_conv_minmax_fp32_scalar_lrintf_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
-TEST(QS8_GEMM_MINMAX_FP32_2X2__SCALAR_LRINT, qmax) {
+TEST(QS8_GEMM_MINMAX_FP32_2X2__SCALAR_LRINTF, qmax) {
GemmMicrokernelTester()
.mr(2)
.nr(2)
@@ -94752,10 +94752,10 @@
.n(2)
.k(1)
.qmax(128)
- .Test(xnn_qs8_gemm_minmax_fp32_ukernel_2x2__scalar_lrint, xnn_init_qs8_conv_minmax_fp32_scalar_lrint_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qs8_gemm_minmax_fp32_ukernel_2x2__scalar_lrintf, xnn_init_qs8_conv_minmax_fp32_scalar_lrintf_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
-TEST(QS8_GEMM_MINMAX_FP32_2X2__SCALAR_LRINT, strided_cm) {
+TEST(QS8_GEMM_MINMAX_FP32_2X2__SCALAR_LRINTF, strided_cm) {
GemmMicrokernelTester()
.mr(2)
.nr(2)
@@ -94765,11 +94765,11 @@
.n(2)
.k(1)
.cm_stride(5)
- .Test(xnn_qs8_gemm_minmax_fp32_ukernel_2x2__scalar_lrint, xnn_init_qs8_conv_minmax_fp32_scalar_lrint_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qs8_gemm_minmax_fp32_ukernel_2x2__scalar_lrintf, xnn_init_qs8_conv_minmax_fp32_scalar_lrintf_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
-TEST(QS8_GEMM_MINMAX_FP32_3X2__SCALAR_LRINT, k_eq_1) {
+TEST(QS8_GEMM_MINMAX_FP32_3X2__SCALAR_LRINTF, k_eq_1) {
GemmMicrokernelTester()
.mr(3)
.nr(2)
@@ -94778,10 +94778,10 @@
.m(3)
.n(2)
.k(1)
- .Test(xnn_qs8_gemm_minmax_fp32_ukernel_3x2__scalar_lrint, xnn_init_qs8_conv_minmax_fp32_scalar_lrint_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qs8_gemm_minmax_fp32_ukernel_3x2__scalar_lrintf, xnn_init_qs8_conv_minmax_fp32_scalar_lrintf_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
-TEST(QS8_GEMM_MINMAX_FP32_3X2__SCALAR_LRINT, strided_cn) {
+TEST(QS8_GEMM_MINMAX_FP32_3X2__SCALAR_LRINTF, strided_cn) {
GemmMicrokernelTester()
.mr(3)
.nr(2)
@@ -94791,10 +94791,10 @@
.n(2)
.k(1)
.cn_stride(5)
- .Test(xnn_qs8_gemm_minmax_fp32_ukernel_3x2__scalar_lrint, xnn_init_qs8_conv_minmax_fp32_scalar_lrint_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qs8_gemm_minmax_fp32_ukernel_3x2__scalar_lrintf, xnn_init_qs8_conv_minmax_fp32_scalar_lrintf_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
-TEST(QS8_GEMM_MINMAX_FP32_3X2__SCALAR_LRINT, k_eq_1_strided_a) {
+TEST(QS8_GEMM_MINMAX_FP32_3X2__SCALAR_LRINTF, k_eq_1_strided_a) {
GemmMicrokernelTester()
.mr(3)
.nr(2)
@@ -94804,10 +94804,10 @@
.n(2)
.k(1)
.a_stride(3)
- .Test(xnn_qs8_gemm_minmax_fp32_ukernel_3x2__scalar_lrint, xnn_init_qs8_conv_minmax_fp32_scalar_lrint_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qs8_gemm_minmax_fp32_ukernel_3x2__scalar_lrintf, xnn_init_qs8_conv_minmax_fp32_scalar_lrintf_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
-TEST(QS8_GEMM_MINMAX_FP32_3X2__SCALAR_LRINT, k_eq_1_subtile) {
+TEST(QS8_GEMM_MINMAX_FP32_3X2__SCALAR_LRINTF, k_eq_1_subtile) {
for (uint32_t m = 1; m <= 3; m++) {
for (uint32_t n = 1; n <= 2; n++) {
GemmMicrokernelTester()
@@ -94819,12 +94819,12 @@
.n(n)
.k(1)
.iterations(1)
- .Test(xnn_qs8_gemm_minmax_fp32_ukernel_3x2__scalar_lrint, xnn_init_qs8_conv_minmax_fp32_scalar_lrint_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qs8_gemm_minmax_fp32_ukernel_3x2__scalar_lrintf, xnn_init_qs8_conv_minmax_fp32_scalar_lrintf_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
}
-TEST(QS8_GEMM_MINMAX_FP32_3X2__SCALAR_LRINT, k_eq_1_subtile_m) {
+TEST(QS8_GEMM_MINMAX_FP32_3X2__SCALAR_LRINTF, k_eq_1_subtile_m) {
for (uint32_t m = 1; m <= 3; m++) {
GemmMicrokernelTester()
.mr(3)
@@ -94835,11 +94835,11 @@
.n(2)
.k(1)
.iterations(1)
- .Test(xnn_qs8_gemm_minmax_fp32_ukernel_3x2__scalar_lrint, xnn_init_qs8_conv_minmax_fp32_scalar_lrint_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qs8_gemm_minmax_fp32_ukernel_3x2__scalar_lrintf, xnn_init_qs8_conv_minmax_fp32_scalar_lrintf_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
-TEST(QS8_GEMM_MINMAX_FP32_3X2__SCALAR_LRINT, k_eq_1_subtile_n) {
+TEST(QS8_GEMM_MINMAX_FP32_3X2__SCALAR_LRINTF, k_eq_1_subtile_n) {
for (uint32_t n = 1; n <= 2; n++) {
GemmMicrokernelTester()
.mr(3)
@@ -94850,11 +94850,11 @@
.n(n)
.k(1)
.iterations(1)
- .Test(xnn_qs8_gemm_minmax_fp32_ukernel_3x2__scalar_lrint, xnn_init_qs8_conv_minmax_fp32_scalar_lrint_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qs8_gemm_minmax_fp32_ukernel_3x2__scalar_lrintf, xnn_init_qs8_conv_minmax_fp32_scalar_lrintf_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
-TEST(QS8_GEMM_MINMAX_FP32_3X2__SCALAR_LRINT, k_gt_1) {
+TEST(QS8_GEMM_MINMAX_FP32_3X2__SCALAR_LRINTF, k_gt_1) {
for (size_t k = 2; k < 10; k++) {
GemmMicrokernelTester()
.mr(3)
@@ -94864,11 +94864,11 @@
.m(3)
.n(2)
.k(k)
- .Test(xnn_qs8_gemm_minmax_fp32_ukernel_3x2__scalar_lrint, xnn_init_qs8_conv_minmax_fp32_scalar_lrint_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qs8_gemm_minmax_fp32_ukernel_3x2__scalar_lrintf, xnn_init_qs8_conv_minmax_fp32_scalar_lrintf_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
-TEST(QS8_GEMM_MINMAX_FP32_3X2__SCALAR_LRINT, k_gt_1_strided_a) {
+TEST(QS8_GEMM_MINMAX_FP32_3X2__SCALAR_LRINTF, k_gt_1_strided_a) {
for (size_t k = 2; k < 10; k++) {
GemmMicrokernelTester()
.mr(3)
@@ -94879,11 +94879,11 @@
.n(2)
.k(k)
.a_stride(11)
- .Test(xnn_qs8_gemm_minmax_fp32_ukernel_3x2__scalar_lrint, xnn_init_qs8_conv_minmax_fp32_scalar_lrint_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qs8_gemm_minmax_fp32_ukernel_3x2__scalar_lrintf, xnn_init_qs8_conv_minmax_fp32_scalar_lrintf_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
-TEST(QS8_GEMM_MINMAX_FP32_3X2__SCALAR_LRINT, k_gt_1_subtile) {
+TEST(QS8_GEMM_MINMAX_FP32_3X2__SCALAR_LRINTF, k_gt_1_subtile) {
for (size_t k = 2; k < 10; k++) {
for (uint32_t m = 1; m <= 3; m++) {
for (uint32_t n = 1; n <= 2; n++) {
@@ -94896,13 +94896,13 @@
.n(n)
.k(k)
.iterations(1)
- .Test(xnn_qs8_gemm_minmax_fp32_ukernel_3x2__scalar_lrint, xnn_init_qs8_conv_minmax_fp32_scalar_lrint_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qs8_gemm_minmax_fp32_ukernel_3x2__scalar_lrintf, xnn_init_qs8_conv_minmax_fp32_scalar_lrintf_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
}
}
-TEST(QS8_GEMM_MINMAX_FP32_3X2__SCALAR_LRINT, n_gt_2) {
+TEST(QS8_GEMM_MINMAX_FP32_3X2__SCALAR_LRINTF, n_gt_2) {
for (uint32_t n = 3; n < 4; n++) {
for (size_t k = 1; k <= 5; k += 2) {
GemmMicrokernelTester()
@@ -94913,12 +94913,12 @@
.m(3)
.n(2)
.k(k)
- .Test(xnn_qs8_gemm_minmax_fp32_ukernel_3x2__scalar_lrint, xnn_init_qs8_conv_minmax_fp32_scalar_lrint_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qs8_gemm_minmax_fp32_ukernel_3x2__scalar_lrintf, xnn_init_qs8_conv_minmax_fp32_scalar_lrintf_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
}
-TEST(QS8_GEMM_MINMAX_FP32_3X2__SCALAR_LRINT, n_gt_2_strided_cn) {
+TEST(QS8_GEMM_MINMAX_FP32_3X2__SCALAR_LRINTF, n_gt_2_strided_cn) {
for (uint32_t n = 3; n < 4; n++) {
for (size_t k = 1; k <= 5; k += 2) {
GemmMicrokernelTester()
@@ -94930,12 +94930,12 @@
.n(2)
.k(k)
.cn_stride(5)
- .Test(xnn_qs8_gemm_minmax_fp32_ukernel_3x2__scalar_lrint, xnn_init_qs8_conv_minmax_fp32_scalar_lrint_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qs8_gemm_minmax_fp32_ukernel_3x2__scalar_lrintf, xnn_init_qs8_conv_minmax_fp32_scalar_lrintf_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
}
-TEST(QS8_GEMM_MINMAX_FP32_3X2__SCALAR_LRINT, n_gt_2_strided_a) {
+TEST(QS8_GEMM_MINMAX_FP32_3X2__SCALAR_LRINTF, n_gt_2_strided_a) {
for (uint32_t n = 3; n < 4; n++) {
for (size_t k = 1; k <= 5; k += 2) {
GemmMicrokernelTester()
@@ -94947,12 +94947,12 @@
.n(n)
.k(k)
.a_stride(7)
- .Test(xnn_qs8_gemm_minmax_fp32_ukernel_3x2__scalar_lrint, xnn_init_qs8_conv_minmax_fp32_scalar_lrint_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qs8_gemm_minmax_fp32_ukernel_3x2__scalar_lrintf, xnn_init_qs8_conv_minmax_fp32_scalar_lrintf_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
}
-TEST(QS8_GEMM_MINMAX_FP32_3X2__SCALAR_LRINT, n_gt_2_subtile) {
+TEST(QS8_GEMM_MINMAX_FP32_3X2__SCALAR_LRINTF, n_gt_2_subtile) {
for (uint32_t n = 3; n < 4; n++) {
for (size_t k = 1; k <= 5; k += 2) {
for (uint32_t m = 1; m <= 3; m++) {
@@ -94965,13 +94965,13 @@
.n(n)
.k(k)
.iterations(1)
- .Test(xnn_qs8_gemm_minmax_fp32_ukernel_3x2__scalar_lrint, xnn_init_qs8_conv_minmax_fp32_scalar_lrint_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qs8_gemm_minmax_fp32_ukernel_3x2__scalar_lrintf, xnn_init_qs8_conv_minmax_fp32_scalar_lrintf_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
}
}
-TEST(QS8_GEMM_MINMAX_FP32_3X2__SCALAR_LRINT, n_div_2) {
+TEST(QS8_GEMM_MINMAX_FP32_3X2__SCALAR_LRINTF, n_div_2) {
for (uint32_t n = 4; n <= 6; n += 2) {
for (size_t k = 1; k <= 5; k += 2) {
GemmMicrokernelTester()
@@ -94982,12 +94982,12 @@
.m(3)
.n(2)
.k(k)
- .Test(xnn_qs8_gemm_minmax_fp32_ukernel_3x2__scalar_lrint, xnn_init_qs8_conv_minmax_fp32_scalar_lrint_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qs8_gemm_minmax_fp32_ukernel_3x2__scalar_lrintf, xnn_init_qs8_conv_minmax_fp32_scalar_lrintf_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
}
-TEST(QS8_GEMM_MINMAX_FP32_3X2__SCALAR_LRINT, n_div_2_strided_cn) {
+TEST(QS8_GEMM_MINMAX_FP32_3X2__SCALAR_LRINTF, n_div_2_strided_cn) {
for (uint32_t n = 4; n <= 6; n += 2) {
for (size_t k = 1; k <= 5; k += 2) {
GemmMicrokernelTester()
@@ -94999,12 +94999,12 @@
.n(n)
.k(k)
.cn_stride(5)
- .Test(xnn_qs8_gemm_minmax_fp32_ukernel_3x2__scalar_lrint, xnn_init_qs8_conv_minmax_fp32_scalar_lrint_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qs8_gemm_minmax_fp32_ukernel_3x2__scalar_lrintf, xnn_init_qs8_conv_minmax_fp32_scalar_lrintf_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
}
-TEST(QS8_GEMM_MINMAX_FP32_3X2__SCALAR_LRINT, n_div_2_strided_a) {
+TEST(QS8_GEMM_MINMAX_FP32_3X2__SCALAR_LRINTF, n_div_2_strided_a) {
for (uint32_t n = 4; n <= 6; n += 2) {
for (size_t k = 1; k <= 5; k += 2) {
GemmMicrokernelTester()
@@ -95016,12 +95016,12 @@
.n(n)
.k(k)
.a_stride(7)
- .Test(xnn_qs8_gemm_minmax_fp32_ukernel_3x2__scalar_lrint, xnn_init_qs8_conv_minmax_fp32_scalar_lrint_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qs8_gemm_minmax_fp32_ukernel_3x2__scalar_lrintf, xnn_init_qs8_conv_minmax_fp32_scalar_lrintf_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
}
-TEST(QS8_GEMM_MINMAX_FP32_3X2__SCALAR_LRINT, n_div_2_subtile) {
+TEST(QS8_GEMM_MINMAX_FP32_3X2__SCALAR_LRINTF, n_div_2_subtile) {
for (uint32_t n = 4; n <= 6; n += 2) {
for (size_t k = 1; k <= 5; k += 2) {
for (uint32_t m = 1; m <= 3; m++) {
@@ -95034,13 +95034,13 @@
.n(n)
.k(k)
.iterations(1)
- .Test(xnn_qs8_gemm_minmax_fp32_ukernel_3x2__scalar_lrint, xnn_init_qs8_conv_minmax_fp32_scalar_lrint_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qs8_gemm_minmax_fp32_ukernel_3x2__scalar_lrintf, xnn_init_qs8_conv_minmax_fp32_scalar_lrintf_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
}
}
-TEST(QS8_GEMM_MINMAX_FP32_3X2__SCALAR_LRINT, strided_cm_subtile) {
+TEST(QS8_GEMM_MINMAX_FP32_3X2__SCALAR_LRINTF, strided_cm_subtile) {
for (size_t k = 1; k <= 5; k += 2) {
for (uint32_t m = 1; m <= 3; m++) {
for (uint32_t n = 1; n <= 2; n++) {
@@ -95054,13 +95054,13 @@
.k(k)
.cm_stride(5)
.iterations(1)
- .Test(xnn_qs8_gemm_minmax_fp32_ukernel_3x2__scalar_lrint, xnn_init_qs8_conv_minmax_fp32_scalar_lrint_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qs8_gemm_minmax_fp32_ukernel_3x2__scalar_lrintf, xnn_init_qs8_conv_minmax_fp32_scalar_lrintf_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
}
}
-TEST(QS8_GEMM_MINMAX_FP32_3X2__SCALAR_LRINT, qmin) {
+TEST(QS8_GEMM_MINMAX_FP32_3X2__SCALAR_LRINTF, qmin) {
GemmMicrokernelTester()
.mr(3)
.nr(2)
@@ -95070,10 +95070,10 @@
.n(2)
.k(1)
.qmin(128)
- .Test(xnn_qs8_gemm_minmax_fp32_ukernel_3x2__scalar_lrint, xnn_init_qs8_conv_minmax_fp32_scalar_lrint_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qs8_gemm_minmax_fp32_ukernel_3x2__scalar_lrintf, xnn_init_qs8_conv_minmax_fp32_scalar_lrintf_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
-TEST(QS8_GEMM_MINMAX_FP32_3X2__SCALAR_LRINT, qmax) {
+TEST(QS8_GEMM_MINMAX_FP32_3X2__SCALAR_LRINTF, qmax) {
GemmMicrokernelTester()
.mr(3)
.nr(2)
@@ -95083,10 +95083,10 @@
.n(2)
.k(1)
.qmax(128)
- .Test(xnn_qs8_gemm_minmax_fp32_ukernel_3x2__scalar_lrint, xnn_init_qs8_conv_minmax_fp32_scalar_lrint_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qs8_gemm_minmax_fp32_ukernel_3x2__scalar_lrintf, xnn_init_qs8_conv_minmax_fp32_scalar_lrintf_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
-TEST(QS8_GEMM_MINMAX_FP32_3X2__SCALAR_LRINT, strided_cm) {
+TEST(QS8_GEMM_MINMAX_FP32_3X2__SCALAR_LRINTF, strided_cm) {
GemmMicrokernelTester()
.mr(3)
.nr(2)
@@ -95096,11 +95096,11 @@
.n(2)
.k(1)
.cm_stride(5)
- .Test(xnn_qs8_gemm_minmax_fp32_ukernel_3x2__scalar_lrint, xnn_init_qs8_conv_minmax_fp32_scalar_lrint_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qs8_gemm_minmax_fp32_ukernel_3x2__scalar_lrintf, xnn_init_qs8_conv_minmax_fp32_scalar_lrintf_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
-TEST(QS8_GEMM_MINMAX_FP32_4X2__SCALAR_LRINT, k_eq_1) {
+TEST(QS8_GEMM_MINMAX_FP32_4X2__SCALAR_LRINTF, k_eq_1) {
GemmMicrokernelTester()
.mr(4)
.nr(2)
@@ -95109,10 +95109,10 @@
.m(4)
.n(2)
.k(1)
- .Test(xnn_qs8_gemm_minmax_fp32_ukernel_4x2__scalar_lrint, xnn_init_qs8_conv_minmax_fp32_scalar_lrint_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qs8_gemm_minmax_fp32_ukernel_4x2__scalar_lrintf, xnn_init_qs8_conv_minmax_fp32_scalar_lrintf_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
-TEST(QS8_GEMM_MINMAX_FP32_4X2__SCALAR_LRINT, strided_cn) {
+TEST(QS8_GEMM_MINMAX_FP32_4X2__SCALAR_LRINTF, strided_cn) {
GemmMicrokernelTester()
.mr(4)
.nr(2)
@@ -95122,10 +95122,10 @@
.n(2)
.k(1)
.cn_stride(5)
- .Test(xnn_qs8_gemm_minmax_fp32_ukernel_4x2__scalar_lrint, xnn_init_qs8_conv_minmax_fp32_scalar_lrint_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qs8_gemm_minmax_fp32_ukernel_4x2__scalar_lrintf, xnn_init_qs8_conv_minmax_fp32_scalar_lrintf_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
-TEST(QS8_GEMM_MINMAX_FP32_4X2__SCALAR_LRINT, k_eq_1_strided_a) {
+TEST(QS8_GEMM_MINMAX_FP32_4X2__SCALAR_LRINTF, k_eq_1_strided_a) {
GemmMicrokernelTester()
.mr(4)
.nr(2)
@@ -95135,10 +95135,10 @@
.n(2)
.k(1)
.a_stride(3)
- .Test(xnn_qs8_gemm_minmax_fp32_ukernel_4x2__scalar_lrint, xnn_init_qs8_conv_minmax_fp32_scalar_lrint_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qs8_gemm_minmax_fp32_ukernel_4x2__scalar_lrintf, xnn_init_qs8_conv_minmax_fp32_scalar_lrintf_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
-TEST(QS8_GEMM_MINMAX_FP32_4X2__SCALAR_LRINT, k_eq_1_subtile) {
+TEST(QS8_GEMM_MINMAX_FP32_4X2__SCALAR_LRINTF, k_eq_1_subtile) {
for (uint32_t m = 1; m <= 4; m++) {
for (uint32_t n = 1; n <= 2; n++) {
GemmMicrokernelTester()
@@ -95150,12 +95150,12 @@
.n(n)
.k(1)
.iterations(1)
- .Test(xnn_qs8_gemm_minmax_fp32_ukernel_4x2__scalar_lrint, xnn_init_qs8_conv_minmax_fp32_scalar_lrint_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qs8_gemm_minmax_fp32_ukernel_4x2__scalar_lrintf, xnn_init_qs8_conv_minmax_fp32_scalar_lrintf_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
}
-TEST(QS8_GEMM_MINMAX_FP32_4X2__SCALAR_LRINT, k_eq_1_subtile_m) {
+TEST(QS8_GEMM_MINMAX_FP32_4X2__SCALAR_LRINTF, k_eq_1_subtile_m) {
for (uint32_t m = 1; m <= 4; m++) {
GemmMicrokernelTester()
.mr(4)
@@ -95166,11 +95166,11 @@
.n(2)
.k(1)
.iterations(1)
- .Test(xnn_qs8_gemm_minmax_fp32_ukernel_4x2__scalar_lrint, xnn_init_qs8_conv_minmax_fp32_scalar_lrint_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qs8_gemm_minmax_fp32_ukernel_4x2__scalar_lrintf, xnn_init_qs8_conv_minmax_fp32_scalar_lrintf_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
-TEST(QS8_GEMM_MINMAX_FP32_4X2__SCALAR_LRINT, k_eq_1_subtile_n) {
+TEST(QS8_GEMM_MINMAX_FP32_4X2__SCALAR_LRINTF, k_eq_1_subtile_n) {
for (uint32_t n = 1; n <= 2; n++) {
GemmMicrokernelTester()
.mr(4)
@@ -95181,11 +95181,11 @@
.n(n)
.k(1)
.iterations(1)
- .Test(xnn_qs8_gemm_minmax_fp32_ukernel_4x2__scalar_lrint, xnn_init_qs8_conv_minmax_fp32_scalar_lrint_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qs8_gemm_minmax_fp32_ukernel_4x2__scalar_lrintf, xnn_init_qs8_conv_minmax_fp32_scalar_lrintf_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
-TEST(QS8_GEMM_MINMAX_FP32_4X2__SCALAR_LRINT, k_gt_1) {
+TEST(QS8_GEMM_MINMAX_FP32_4X2__SCALAR_LRINTF, k_gt_1) {
for (size_t k = 2; k < 10; k++) {
GemmMicrokernelTester()
.mr(4)
@@ -95195,11 +95195,11 @@
.m(4)
.n(2)
.k(k)
- .Test(xnn_qs8_gemm_minmax_fp32_ukernel_4x2__scalar_lrint, xnn_init_qs8_conv_minmax_fp32_scalar_lrint_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qs8_gemm_minmax_fp32_ukernel_4x2__scalar_lrintf, xnn_init_qs8_conv_minmax_fp32_scalar_lrintf_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
-TEST(QS8_GEMM_MINMAX_FP32_4X2__SCALAR_LRINT, k_gt_1_strided_a) {
+TEST(QS8_GEMM_MINMAX_FP32_4X2__SCALAR_LRINTF, k_gt_1_strided_a) {
for (size_t k = 2; k < 10; k++) {
GemmMicrokernelTester()
.mr(4)
@@ -95210,11 +95210,11 @@
.n(2)
.k(k)
.a_stride(11)
- .Test(xnn_qs8_gemm_minmax_fp32_ukernel_4x2__scalar_lrint, xnn_init_qs8_conv_minmax_fp32_scalar_lrint_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qs8_gemm_minmax_fp32_ukernel_4x2__scalar_lrintf, xnn_init_qs8_conv_minmax_fp32_scalar_lrintf_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
-TEST(QS8_GEMM_MINMAX_FP32_4X2__SCALAR_LRINT, k_gt_1_subtile) {
+TEST(QS8_GEMM_MINMAX_FP32_4X2__SCALAR_LRINTF, k_gt_1_subtile) {
for (size_t k = 2; k < 10; k++) {
for (uint32_t m = 1; m <= 4; m++) {
for (uint32_t n = 1; n <= 2; n++) {
@@ -95227,13 +95227,13 @@
.n(n)
.k(k)
.iterations(1)
- .Test(xnn_qs8_gemm_minmax_fp32_ukernel_4x2__scalar_lrint, xnn_init_qs8_conv_minmax_fp32_scalar_lrint_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qs8_gemm_minmax_fp32_ukernel_4x2__scalar_lrintf, xnn_init_qs8_conv_minmax_fp32_scalar_lrintf_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
}
}
-TEST(QS8_GEMM_MINMAX_FP32_4X2__SCALAR_LRINT, n_gt_2) {
+TEST(QS8_GEMM_MINMAX_FP32_4X2__SCALAR_LRINTF, n_gt_2) {
for (uint32_t n = 3; n < 4; n++) {
for (size_t k = 1; k <= 5; k += 2) {
GemmMicrokernelTester()
@@ -95244,12 +95244,12 @@
.m(4)
.n(2)
.k(k)
- .Test(xnn_qs8_gemm_minmax_fp32_ukernel_4x2__scalar_lrint, xnn_init_qs8_conv_minmax_fp32_scalar_lrint_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qs8_gemm_minmax_fp32_ukernel_4x2__scalar_lrintf, xnn_init_qs8_conv_minmax_fp32_scalar_lrintf_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
}
-TEST(QS8_GEMM_MINMAX_FP32_4X2__SCALAR_LRINT, n_gt_2_strided_cn) {
+TEST(QS8_GEMM_MINMAX_FP32_4X2__SCALAR_LRINTF, n_gt_2_strided_cn) {
for (uint32_t n = 3; n < 4; n++) {
for (size_t k = 1; k <= 5; k += 2) {
GemmMicrokernelTester()
@@ -95261,12 +95261,12 @@
.n(2)
.k(k)
.cn_stride(5)
- .Test(xnn_qs8_gemm_minmax_fp32_ukernel_4x2__scalar_lrint, xnn_init_qs8_conv_minmax_fp32_scalar_lrint_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qs8_gemm_minmax_fp32_ukernel_4x2__scalar_lrintf, xnn_init_qs8_conv_minmax_fp32_scalar_lrintf_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
}
-TEST(QS8_GEMM_MINMAX_FP32_4X2__SCALAR_LRINT, n_gt_2_strided_a) {
+TEST(QS8_GEMM_MINMAX_FP32_4X2__SCALAR_LRINTF, n_gt_2_strided_a) {
for (uint32_t n = 3; n < 4; n++) {
for (size_t k = 1; k <= 5; k += 2) {
GemmMicrokernelTester()
@@ -95278,12 +95278,12 @@
.n(n)
.k(k)
.a_stride(7)
- .Test(xnn_qs8_gemm_minmax_fp32_ukernel_4x2__scalar_lrint, xnn_init_qs8_conv_minmax_fp32_scalar_lrint_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qs8_gemm_minmax_fp32_ukernel_4x2__scalar_lrintf, xnn_init_qs8_conv_minmax_fp32_scalar_lrintf_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
}
-TEST(QS8_GEMM_MINMAX_FP32_4X2__SCALAR_LRINT, n_gt_2_subtile) {
+TEST(QS8_GEMM_MINMAX_FP32_4X2__SCALAR_LRINTF, n_gt_2_subtile) {
for (uint32_t n = 3; n < 4; n++) {
for (size_t k = 1; k <= 5; k += 2) {
for (uint32_t m = 1; m <= 4; m++) {
@@ -95296,13 +95296,13 @@
.n(n)
.k(k)
.iterations(1)
- .Test(xnn_qs8_gemm_minmax_fp32_ukernel_4x2__scalar_lrint, xnn_init_qs8_conv_minmax_fp32_scalar_lrint_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qs8_gemm_minmax_fp32_ukernel_4x2__scalar_lrintf, xnn_init_qs8_conv_minmax_fp32_scalar_lrintf_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
}
}
-TEST(QS8_GEMM_MINMAX_FP32_4X2__SCALAR_LRINT, n_div_2) {
+TEST(QS8_GEMM_MINMAX_FP32_4X2__SCALAR_LRINTF, n_div_2) {
for (uint32_t n = 4; n <= 6; n += 2) {
for (size_t k = 1; k <= 5; k += 2) {
GemmMicrokernelTester()
@@ -95313,12 +95313,12 @@
.m(4)
.n(2)
.k(k)
- .Test(xnn_qs8_gemm_minmax_fp32_ukernel_4x2__scalar_lrint, xnn_init_qs8_conv_minmax_fp32_scalar_lrint_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qs8_gemm_minmax_fp32_ukernel_4x2__scalar_lrintf, xnn_init_qs8_conv_minmax_fp32_scalar_lrintf_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
}
-TEST(QS8_GEMM_MINMAX_FP32_4X2__SCALAR_LRINT, n_div_2_strided_cn) {
+TEST(QS8_GEMM_MINMAX_FP32_4X2__SCALAR_LRINTF, n_div_2_strided_cn) {
for (uint32_t n = 4; n <= 6; n += 2) {
for (size_t k = 1; k <= 5; k += 2) {
GemmMicrokernelTester()
@@ -95330,12 +95330,12 @@
.n(n)
.k(k)
.cn_stride(5)
- .Test(xnn_qs8_gemm_minmax_fp32_ukernel_4x2__scalar_lrint, xnn_init_qs8_conv_minmax_fp32_scalar_lrint_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qs8_gemm_minmax_fp32_ukernel_4x2__scalar_lrintf, xnn_init_qs8_conv_minmax_fp32_scalar_lrintf_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
}
-TEST(QS8_GEMM_MINMAX_FP32_4X2__SCALAR_LRINT, n_div_2_strided_a) {
+TEST(QS8_GEMM_MINMAX_FP32_4X2__SCALAR_LRINTF, n_div_2_strided_a) {
for (uint32_t n = 4; n <= 6; n += 2) {
for (size_t k = 1; k <= 5; k += 2) {
GemmMicrokernelTester()
@@ -95347,12 +95347,12 @@
.n(n)
.k(k)
.a_stride(7)
- .Test(xnn_qs8_gemm_minmax_fp32_ukernel_4x2__scalar_lrint, xnn_init_qs8_conv_minmax_fp32_scalar_lrint_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qs8_gemm_minmax_fp32_ukernel_4x2__scalar_lrintf, xnn_init_qs8_conv_minmax_fp32_scalar_lrintf_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
}
-TEST(QS8_GEMM_MINMAX_FP32_4X2__SCALAR_LRINT, n_div_2_subtile) {
+TEST(QS8_GEMM_MINMAX_FP32_4X2__SCALAR_LRINTF, n_div_2_subtile) {
for (uint32_t n = 4; n <= 6; n += 2) {
for (size_t k = 1; k <= 5; k += 2) {
for (uint32_t m = 1; m <= 4; m++) {
@@ -95365,13 +95365,13 @@
.n(n)
.k(k)
.iterations(1)
- .Test(xnn_qs8_gemm_minmax_fp32_ukernel_4x2__scalar_lrint, xnn_init_qs8_conv_minmax_fp32_scalar_lrint_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qs8_gemm_minmax_fp32_ukernel_4x2__scalar_lrintf, xnn_init_qs8_conv_minmax_fp32_scalar_lrintf_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
}
}
-TEST(QS8_GEMM_MINMAX_FP32_4X2__SCALAR_LRINT, strided_cm_subtile) {
+TEST(QS8_GEMM_MINMAX_FP32_4X2__SCALAR_LRINTF, strided_cm_subtile) {
for (size_t k = 1; k <= 5; k += 2) {
for (uint32_t m = 1; m <= 4; m++) {
for (uint32_t n = 1; n <= 2; n++) {
@@ -95385,13 +95385,13 @@
.k(k)
.cm_stride(5)
.iterations(1)
- .Test(xnn_qs8_gemm_minmax_fp32_ukernel_4x2__scalar_lrint, xnn_init_qs8_conv_minmax_fp32_scalar_lrint_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qs8_gemm_minmax_fp32_ukernel_4x2__scalar_lrintf, xnn_init_qs8_conv_minmax_fp32_scalar_lrintf_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
}
}
-TEST(QS8_GEMM_MINMAX_FP32_4X2__SCALAR_LRINT, qmin) {
+TEST(QS8_GEMM_MINMAX_FP32_4X2__SCALAR_LRINTF, qmin) {
GemmMicrokernelTester()
.mr(4)
.nr(2)
@@ -95401,10 +95401,10 @@
.n(2)
.k(1)
.qmin(128)
- .Test(xnn_qs8_gemm_minmax_fp32_ukernel_4x2__scalar_lrint, xnn_init_qs8_conv_minmax_fp32_scalar_lrint_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qs8_gemm_minmax_fp32_ukernel_4x2__scalar_lrintf, xnn_init_qs8_conv_minmax_fp32_scalar_lrintf_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
-TEST(QS8_GEMM_MINMAX_FP32_4X2__SCALAR_LRINT, qmax) {
+TEST(QS8_GEMM_MINMAX_FP32_4X2__SCALAR_LRINTF, qmax) {
GemmMicrokernelTester()
.mr(4)
.nr(2)
@@ -95414,10 +95414,10 @@
.n(2)
.k(1)
.qmax(128)
- .Test(xnn_qs8_gemm_minmax_fp32_ukernel_4x2__scalar_lrint, xnn_init_qs8_conv_minmax_fp32_scalar_lrint_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qs8_gemm_minmax_fp32_ukernel_4x2__scalar_lrintf, xnn_init_qs8_conv_minmax_fp32_scalar_lrintf_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
-TEST(QS8_GEMM_MINMAX_FP32_4X2__SCALAR_LRINT, strided_cm) {
+TEST(QS8_GEMM_MINMAX_FP32_4X2__SCALAR_LRINTF, strided_cm) {
GemmMicrokernelTester()
.mr(4)
.nr(2)
@@ -95427,11 +95427,11 @@
.n(2)
.k(1)
.cm_stride(5)
- .Test(xnn_qs8_gemm_minmax_fp32_ukernel_4x2__scalar_lrint, xnn_init_qs8_conv_minmax_fp32_scalar_lrint_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qs8_gemm_minmax_fp32_ukernel_4x2__scalar_lrintf, xnn_init_qs8_conv_minmax_fp32_scalar_lrintf_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
-TEST(QS8_GEMM_MINMAX_FP32_1X4__SCALAR_LRINT, k_eq_1) {
+TEST(QS8_GEMM_MINMAX_FP32_1X4__SCALAR_LRINTF, k_eq_1) {
GemmMicrokernelTester()
.mr(1)
.nr(4)
@@ -95440,10 +95440,10 @@
.m(1)
.n(4)
.k(1)
- .Test(xnn_qs8_gemm_minmax_fp32_ukernel_1x4__scalar_lrint, xnn_init_qs8_conv_minmax_fp32_scalar_lrint_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qs8_gemm_minmax_fp32_ukernel_1x4__scalar_lrintf, xnn_init_qs8_conv_minmax_fp32_scalar_lrintf_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
-TEST(QS8_GEMM_MINMAX_FP32_1X4__SCALAR_LRINT, strided_cn) {
+TEST(QS8_GEMM_MINMAX_FP32_1X4__SCALAR_LRINTF, strided_cn) {
GemmMicrokernelTester()
.mr(1)
.nr(4)
@@ -95453,10 +95453,10 @@
.n(4)
.k(1)
.cn_stride(7)
- .Test(xnn_qs8_gemm_minmax_fp32_ukernel_1x4__scalar_lrint, xnn_init_qs8_conv_minmax_fp32_scalar_lrint_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qs8_gemm_minmax_fp32_ukernel_1x4__scalar_lrintf, xnn_init_qs8_conv_minmax_fp32_scalar_lrintf_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
-TEST(QS8_GEMM_MINMAX_FP32_1X4__SCALAR_LRINT, k_eq_1_strided_a) {
+TEST(QS8_GEMM_MINMAX_FP32_1X4__SCALAR_LRINTF, k_eq_1_strided_a) {
GemmMicrokernelTester()
.mr(1)
.nr(4)
@@ -95466,10 +95466,10 @@
.n(4)
.k(1)
.a_stride(3)
- .Test(xnn_qs8_gemm_minmax_fp32_ukernel_1x4__scalar_lrint, xnn_init_qs8_conv_minmax_fp32_scalar_lrint_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qs8_gemm_minmax_fp32_ukernel_1x4__scalar_lrintf, xnn_init_qs8_conv_minmax_fp32_scalar_lrintf_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
-TEST(QS8_GEMM_MINMAX_FP32_1X4__SCALAR_LRINT, k_eq_1_subtile) {
+TEST(QS8_GEMM_MINMAX_FP32_1X4__SCALAR_LRINTF, k_eq_1_subtile) {
for (uint32_t m = 1; m <= 1; m++) {
for (uint32_t n = 1; n <= 4; n++) {
GemmMicrokernelTester()
@@ -95481,12 +95481,12 @@
.n(n)
.k(1)
.iterations(1)
- .Test(xnn_qs8_gemm_minmax_fp32_ukernel_1x4__scalar_lrint, xnn_init_qs8_conv_minmax_fp32_scalar_lrint_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qs8_gemm_minmax_fp32_ukernel_1x4__scalar_lrintf, xnn_init_qs8_conv_minmax_fp32_scalar_lrintf_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
}
-TEST(QS8_GEMM_MINMAX_FP32_1X4__SCALAR_LRINT, k_eq_1_subtile_m) {
+TEST(QS8_GEMM_MINMAX_FP32_1X4__SCALAR_LRINTF, k_eq_1_subtile_m) {
for (uint32_t m = 1; m <= 1; m++) {
GemmMicrokernelTester()
.mr(1)
@@ -95497,11 +95497,11 @@
.n(4)
.k(1)
.iterations(1)
- .Test(xnn_qs8_gemm_minmax_fp32_ukernel_1x4__scalar_lrint, xnn_init_qs8_conv_minmax_fp32_scalar_lrint_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qs8_gemm_minmax_fp32_ukernel_1x4__scalar_lrintf, xnn_init_qs8_conv_minmax_fp32_scalar_lrintf_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
-TEST(QS8_GEMM_MINMAX_FP32_1X4__SCALAR_LRINT, k_eq_1_subtile_n) {
+TEST(QS8_GEMM_MINMAX_FP32_1X4__SCALAR_LRINTF, k_eq_1_subtile_n) {
for (uint32_t n = 1; n <= 4; n++) {
GemmMicrokernelTester()
.mr(1)
@@ -95512,11 +95512,11 @@
.n(n)
.k(1)
.iterations(1)
- .Test(xnn_qs8_gemm_minmax_fp32_ukernel_1x4__scalar_lrint, xnn_init_qs8_conv_minmax_fp32_scalar_lrint_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qs8_gemm_minmax_fp32_ukernel_1x4__scalar_lrintf, xnn_init_qs8_conv_minmax_fp32_scalar_lrintf_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
-TEST(QS8_GEMM_MINMAX_FP32_1X4__SCALAR_LRINT, k_gt_1) {
+TEST(QS8_GEMM_MINMAX_FP32_1X4__SCALAR_LRINTF, k_gt_1) {
for (size_t k = 2; k < 10; k++) {
GemmMicrokernelTester()
.mr(1)
@@ -95526,11 +95526,11 @@
.m(1)
.n(4)
.k(k)
- .Test(xnn_qs8_gemm_minmax_fp32_ukernel_1x4__scalar_lrint, xnn_init_qs8_conv_minmax_fp32_scalar_lrint_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qs8_gemm_minmax_fp32_ukernel_1x4__scalar_lrintf, xnn_init_qs8_conv_minmax_fp32_scalar_lrintf_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
-TEST(QS8_GEMM_MINMAX_FP32_1X4__SCALAR_LRINT, k_gt_1_strided_a) {
+TEST(QS8_GEMM_MINMAX_FP32_1X4__SCALAR_LRINTF, k_gt_1_strided_a) {
for (size_t k = 2; k < 10; k++) {
GemmMicrokernelTester()
.mr(1)
@@ -95541,11 +95541,11 @@
.n(4)
.k(k)
.a_stride(11)
- .Test(xnn_qs8_gemm_minmax_fp32_ukernel_1x4__scalar_lrint, xnn_init_qs8_conv_minmax_fp32_scalar_lrint_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qs8_gemm_minmax_fp32_ukernel_1x4__scalar_lrintf, xnn_init_qs8_conv_minmax_fp32_scalar_lrintf_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
-TEST(QS8_GEMM_MINMAX_FP32_1X4__SCALAR_LRINT, k_gt_1_subtile) {
+TEST(QS8_GEMM_MINMAX_FP32_1X4__SCALAR_LRINTF, k_gt_1_subtile) {
for (size_t k = 2; k < 10; k++) {
for (uint32_t m = 1; m <= 1; m++) {
for (uint32_t n = 1; n <= 4; n++) {
@@ -95558,13 +95558,13 @@
.n(n)
.k(k)
.iterations(1)
- .Test(xnn_qs8_gemm_minmax_fp32_ukernel_1x4__scalar_lrint, xnn_init_qs8_conv_minmax_fp32_scalar_lrint_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qs8_gemm_minmax_fp32_ukernel_1x4__scalar_lrintf, xnn_init_qs8_conv_minmax_fp32_scalar_lrintf_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
}
}
-TEST(QS8_GEMM_MINMAX_FP32_1X4__SCALAR_LRINT, n_gt_4) {
+TEST(QS8_GEMM_MINMAX_FP32_1X4__SCALAR_LRINTF, n_gt_4) {
for (uint32_t n = 5; n < 8; n++) {
for (size_t k = 1; k <= 5; k += 2) {
GemmMicrokernelTester()
@@ -95575,12 +95575,12 @@
.m(1)
.n(4)
.k(k)
- .Test(xnn_qs8_gemm_minmax_fp32_ukernel_1x4__scalar_lrint, xnn_init_qs8_conv_minmax_fp32_scalar_lrint_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qs8_gemm_minmax_fp32_ukernel_1x4__scalar_lrintf, xnn_init_qs8_conv_minmax_fp32_scalar_lrintf_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
}
-TEST(QS8_GEMM_MINMAX_FP32_1X4__SCALAR_LRINT, n_gt_4_strided_cn) {
+TEST(QS8_GEMM_MINMAX_FP32_1X4__SCALAR_LRINTF, n_gt_4_strided_cn) {
for (uint32_t n = 5; n < 8; n++) {
for (size_t k = 1; k <= 5; k += 2) {
GemmMicrokernelTester()
@@ -95592,12 +95592,12 @@
.n(4)
.k(k)
.cn_stride(7)
- .Test(xnn_qs8_gemm_minmax_fp32_ukernel_1x4__scalar_lrint, xnn_init_qs8_conv_minmax_fp32_scalar_lrint_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qs8_gemm_minmax_fp32_ukernel_1x4__scalar_lrintf, xnn_init_qs8_conv_minmax_fp32_scalar_lrintf_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
}
-TEST(QS8_GEMM_MINMAX_FP32_1X4__SCALAR_LRINT, n_gt_4_strided_a) {
+TEST(QS8_GEMM_MINMAX_FP32_1X4__SCALAR_LRINTF, n_gt_4_strided_a) {
for (uint32_t n = 5; n < 8; n++) {
for (size_t k = 1; k <= 5; k += 2) {
GemmMicrokernelTester()
@@ -95609,12 +95609,12 @@
.n(n)
.k(k)
.a_stride(7)
- .Test(xnn_qs8_gemm_minmax_fp32_ukernel_1x4__scalar_lrint, xnn_init_qs8_conv_minmax_fp32_scalar_lrint_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qs8_gemm_minmax_fp32_ukernel_1x4__scalar_lrintf, xnn_init_qs8_conv_minmax_fp32_scalar_lrintf_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
}
-TEST(QS8_GEMM_MINMAX_FP32_1X4__SCALAR_LRINT, n_gt_4_subtile) {
+TEST(QS8_GEMM_MINMAX_FP32_1X4__SCALAR_LRINTF, n_gt_4_subtile) {
for (uint32_t n = 5; n < 8; n++) {
for (size_t k = 1; k <= 5; k += 2) {
for (uint32_t m = 1; m <= 1; m++) {
@@ -95627,13 +95627,13 @@
.n(n)
.k(k)
.iterations(1)
- .Test(xnn_qs8_gemm_minmax_fp32_ukernel_1x4__scalar_lrint, xnn_init_qs8_conv_minmax_fp32_scalar_lrint_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qs8_gemm_minmax_fp32_ukernel_1x4__scalar_lrintf, xnn_init_qs8_conv_minmax_fp32_scalar_lrintf_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
}
}
-TEST(QS8_GEMM_MINMAX_FP32_1X4__SCALAR_LRINT, n_div_4) {
+TEST(QS8_GEMM_MINMAX_FP32_1X4__SCALAR_LRINTF, n_div_4) {
for (uint32_t n = 8; n <= 12; n += 4) {
for (size_t k = 1; k <= 5; k += 2) {
GemmMicrokernelTester()
@@ -95644,12 +95644,12 @@
.m(1)
.n(4)
.k(k)
- .Test(xnn_qs8_gemm_minmax_fp32_ukernel_1x4__scalar_lrint, xnn_init_qs8_conv_minmax_fp32_scalar_lrint_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qs8_gemm_minmax_fp32_ukernel_1x4__scalar_lrintf, xnn_init_qs8_conv_minmax_fp32_scalar_lrintf_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
}
-TEST(QS8_GEMM_MINMAX_FP32_1X4__SCALAR_LRINT, n_div_4_strided_cn) {
+TEST(QS8_GEMM_MINMAX_FP32_1X4__SCALAR_LRINTF, n_div_4_strided_cn) {
for (uint32_t n = 8; n <= 12; n += 4) {
for (size_t k = 1; k <= 5; k += 2) {
GemmMicrokernelTester()
@@ -95661,12 +95661,12 @@
.n(n)
.k(k)
.cn_stride(7)
- .Test(xnn_qs8_gemm_minmax_fp32_ukernel_1x4__scalar_lrint, xnn_init_qs8_conv_minmax_fp32_scalar_lrint_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qs8_gemm_minmax_fp32_ukernel_1x4__scalar_lrintf, xnn_init_qs8_conv_minmax_fp32_scalar_lrintf_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
}
-TEST(QS8_GEMM_MINMAX_FP32_1X4__SCALAR_LRINT, n_div_4_strided_a) {
+TEST(QS8_GEMM_MINMAX_FP32_1X4__SCALAR_LRINTF, n_div_4_strided_a) {
for (uint32_t n = 8; n <= 12; n += 4) {
for (size_t k = 1; k <= 5; k += 2) {
GemmMicrokernelTester()
@@ -95678,12 +95678,12 @@
.n(n)
.k(k)
.a_stride(7)
- .Test(xnn_qs8_gemm_minmax_fp32_ukernel_1x4__scalar_lrint, xnn_init_qs8_conv_minmax_fp32_scalar_lrint_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qs8_gemm_minmax_fp32_ukernel_1x4__scalar_lrintf, xnn_init_qs8_conv_minmax_fp32_scalar_lrintf_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
}
-TEST(QS8_GEMM_MINMAX_FP32_1X4__SCALAR_LRINT, n_div_4_subtile) {
+TEST(QS8_GEMM_MINMAX_FP32_1X4__SCALAR_LRINTF, n_div_4_subtile) {
for (uint32_t n = 8; n <= 12; n += 4) {
for (size_t k = 1; k <= 5; k += 2) {
for (uint32_t m = 1; m <= 1; m++) {
@@ -95696,13 +95696,13 @@
.n(n)
.k(k)
.iterations(1)
- .Test(xnn_qs8_gemm_minmax_fp32_ukernel_1x4__scalar_lrint, xnn_init_qs8_conv_minmax_fp32_scalar_lrint_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qs8_gemm_minmax_fp32_ukernel_1x4__scalar_lrintf, xnn_init_qs8_conv_minmax_fp32_scalar_lrintf_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
}
}
-TEST(QS8_GEMM_MINMAX_FP32_1X4__SCALAR_LRINT, strided_cm_subtile) {
+TEST(QS8_GEMM_MINMAX_FP32_1X4__SCALAR_LRINTF, strided_cm_subtile) {
for (size_t k = 1; k <= 5; k += 2) {
for (uint32_t m = 1; m <= 1; m++) {
for (uint32_t n = 1; n <= 4; n++) {
@@ -95716,13 +95716,13 @@
.k(k)
.cm_stride(7)
.iterations(1)
- .Test(xnn_qs8_gemm_minmax_fp32_ukernel_1x4__scalar_lrint, xnn_init_qs8_conv_minmax_fp32_scalar_lrint_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qs8_gemm_minmax_fp32_ukernel_1x4__scalar_lrintf, xnn_init_qs8_conv_minmax_fp32_scalar_lrintf_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
}
}
-TEST(QS8_GEMM_MINMAX_FP32_1X4__SCALAR_LRINT, qmin) {
+TEST(QS8_GEMM_MINMAX_FP32_1X4__SCALAR_LRINTF, qmin) {
GemmMicrokernelTester()
.mr(1)
.nr(4)
@@ -95732,10 +95732,10 @@
.n(4)
.k(1)
.qmin(128)
- .Test(xnn_qs8_gemm_minmax_fp32_ukernel_1x4__scalar_lrint, xnn_init_qs8_conv_minmax_fp32_scalar_lrint_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qs8_gemm_minmax_fp32_ukernel_1x4__scalar_lrintf, xnn_init_qs8_conv_minmax_fp32_scalar_lrintf_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
-TEST(QS8_GEMM_MINMAX_FP32_1X4__SCALAR_LRINT, qmax) {
+TEST(QS8_GEMM_MINMAX_FP32_1X4__SCALAR_LRINTF, qmax) {
GemmMicrokernelTester()
.mr(1)
.nr(4)
@@ -95745,10 +95745,10 @@
.n(4)
.k(1)
.qmax(128)
- .Test(xnn_qs8_gemm_minmax_fp32_ukernel_1x4__scalar_lrint, xnn_init_qs8_conv_minmax_fp32_scalar_lrint_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qs8_gemm_minmax_fp32_ukernel_1x4__scalar_lrintf, xnn_init_qs8_conv_minmax_fp32_scalar_lrintf_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
-TEST(QS8_GEMM_MINMAX_FP32_1X4__SCALAR_LRINT, strided_cm) {
+TEST(QS8_GEMM_MINMAX_FP32_1X4__SCALAR_LRINTF, strided_cm) {
GemmMicrokernelTester()
.mr(1)
.nr(4)
@@ -95758,11 +95758,11 @@
.n(4)
.k(1)
.cm_stride(7)
- .Test(xnn_qs8_gemm_minmax_fp32_ukernel_1x4__scalar_lrint, xnn_init_qs8_conv_minmax_fp32_scalar_lrint_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qs8_gemm_minmax_fp32_ukernel_1x4__scalar_lrintf, xnn_init_qs8_conv_minmax_fp32_scalar_lrintf_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
-TEST(QS8_GEMM_MINMAX_FP32_2X4__SCALAR_LRINT, k_eq_1) {
+TEST(QS8_GEMM_MINMAX_FP32_2X4__SCALAR_LRINTF, k_eq_1) {
GemmMicrokernelTester()
.mr(2)
.nr(4)
@@ -95771,10 +95771,10 @@
.m(2)
.n(4)
.k(1)
- .Test(xnn_qs8_gemm_minmax_fp32_ukernel_2x4__scalar_lrint, xnn_init_qs8_conv_minmax_fp32_scalar_lrint_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qs8_gemm_minmax_fp32_ukernel_2x4__scalar_lrintf, xnn_init_qs8_conv_minmax_fp32_scalar_lrintf_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
-TEST(QS8_GEMM_MINMAX_FP32_2X4__SCALAR_LRINT, strided_cn) {
+TEST(QS8_GEMM_MINMAX_FP32_2X4__SCALAR_LRINTF, strided_cn) {
GemmMicrokernelTester()
.mr(2)
.nr(4)
@@ -95784,10 +95784,10 @@
.n(4)
.k(1)
.cn_stride(7)
- .Test(xnn_qs8_gemm_minmax_fp32_ukernel_2x4__scalar_lrint, xnn_init_qs8_conv_minmax_fp32_scalar_lrint_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qs8_gemm_minmax_fp32_ukernel_2x4__scalar_lrintf, xnn_init_qs8_conv_minmax_fp32_scalar_lrintf_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
-TEST(QS8_GEMM_MINMAX_FP32_2X4__SCALAR_LRINT, k_eq_1_strided_a) {
+TEST(QS8_GEMM_MINMAX_FP32_2X4__SCALAR_LRINTF, k_eq_1_strided_a) {
GemmMicrokernelTester()
.mr(2)
.nr(4)
@@ -95797,10 +95797,10 @@
.n(4)
.k(1)
.a_stride(3)
- .Test(xnn_qs8_gemm_minmax_fp32_ukernel_2x4__scalar_lrint, xnn_init_qs8_conv_minmax_fp32_scalar_lrint_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qs8_gemm_minmax_fp32_ukernel_2x4__scalar_lrintf, xnn_init_qs8_conv_minmax_fp32_scalar_lrintf_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
-TEST(QS8_GEMM_MINMAX_FP32_2X4__SCALAR_LRINT, k_eq_1_subtile) {
+TEST(QS8_GEMM_MINMAX_FP32_2X4__SCALAR_LRINTF, k_eq_1_subtile) {
for (uint32_t m = 1; m <= 2; m++) {
for (uint32_t n = 1; n <= 4; n++) {
GemmMicrokernelTester()
@@ -95812,12 +95812,12 @@
.n(n)
.k(1)
.iterations(1)
- .Test(xnn_qs8_gemm_minmax_fp32_ukernel_2x4__scalar_lrint, xnn_init_qs8_conv_minmax_fp32_scalar_lrint_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qs8_gemm_minmax_fp32_ukernel_2x4__scalar_lrintf, xnn_init_qs8_conv_minmax_fp32_scalar_lrintf_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
}
-TEST(QS8_GEMM_MINMAX_FP32_2X4__SCALAR_LRINT, k_eq_1_subtile_m) {
+TEST(QS8_GEMM_MINMAX_FP32_2X4__SCALAR_LRINTF, k_eq_1_subtile_m) {
for (uint32_t m = 1; m <= 2; m++) {
GemmMicrokernelTester()
.mr(2)
@@ -95828,11 +95828,11 @@
.n(4)
.k(1)
.iterations(1)
- .Test(xnn_qs8_gemm_minmax_fp32_ukernel_2x4__scalar_lrint, xnn_init_qs8_conv_minmax_fp32_scalar_lrint_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qs8_gemm_minmax_fp32_ukernel_2x4__scalar_lrintf, xnn_init_qs8_conv_minmax_fp32_scalar_lrintf_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
-TEST(QS8_GEMM_MINMAX_FP32_2X4__SCALAR_LRINT, k_eq_1_subtile_n) {
+TEST(QS8_GEMM_MINMAX_FP32_2X4__SCALAR_LRINTF, k_eq_1_subtile_n) {
for (uint32_t n = 1; n <= 4; n++) {
GemmMicrokernelTester()
.mr(2)
@@ -95843,11 +95843,11 @@
.n(n)
.k(1)
.iterations(1)
- .Test(xnn_qs8_gemm_minmax_fp32_ukernel_2x4__scalar_lrint, xnn_init_qs8_conv_minmax_fp32_scalar_lrint_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qs8_gemm_minmax_fp32_ukernel_2x4__scalar_lrintf, xnn_init_qs8_conv_minmax_fp32_scalar_lrintf_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
-TEST(QS8_GEMM_MINMAX_FP32_2X4__SCALAR_LRINT, k_gt_1) {
+TEST(QS8_GEMM_MINMAX_FP32_2X4__SCALAR_LRINTF, k_gt_1) {
for (size_t k = 2; k < 10; k++) {
GemmMicrokernelTester()
.mr(2)
@@ -95857,11 +95857,11 @@
.m(2)
.n(4)
.k(k)
- .Test(xnn_qs8_gemm_minmax_fp32_ukernel_2x4__scalar_lrint, xnn_init_qs8_conv_minmax_fp32_scalar_lrint_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qs8_gemm_minmax_fp32_ukernel_2x4__scalar_lrintf, xnn_init_qs8_conv_minmax_fp32_scalar_lrintf_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
-TEST(QS8_GEMM_MINMAX_FP32_2X4__SCALAR_LRINT, k_gt_1_strided_a) {
+TEST(QS8_GEMM_MINMAX_FP32_2X4__SCALAR_LRINTF, k_gt_1_strided_a) {
for (size_t k = 2; k < 10; k++) {
GemmMicrokernelTester()
.mr(2)
@@ -95872,11 +95872,11 @@
.n(4)
.k(k)
.a_stride(11)
- .Test(xnn_qs8_gemm_minmax_fp32_ukernel_2x4__scalar_lrint, xnn_init_qs8_conv_minmax_fp32_scalar_lrint_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qs8_gemm_minmax_fp32_ukernel_2x4__scalar_lrintf, xnn_init_qs8_conv_minmax_fp32_scalar_lrintf_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
-TEST(QS8_GEMM_MINMAX_FP32_2X4__SCALAR_LRINT, k_gt_1_subtile) {
+TEST(QS8_GEMM_MINMAX_FP32_2X4__SCALAR_LRINTF, k_gt_1_subtile) {
for (size_t k = 2; k < 10; k++) {
for (uint32_t m = 1; m <= 2; m++) {
for (uint32_t n = 1; n <= 4; n++) {
@@ -95889,13 +95889,13 @@
.n(n)
.k(k)
.iterations(1)
- .Test(xnn_qs8_gemm_minmax_fp32_ukernel_2x4__scalar_lrint, xnn_init_qs8_conv_minmax_fp32_scalar_lrint_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qs8_gemm_minmax_fp32_ukernel_2x4__scalar_lrintf, xnn_init_qs8_conv_minmax_fp32_scalar_lrintf_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
}
}
-TEST(QS8_GEMM_MINMAX_FP32_2X4__SCALAR_LRINT, n_gt_4) {
+TEST(QS8_GEMM_MINMAX_FP32_2X4__SCALAR_LRINTF, n_gt_4) {
for (uint32_t n = 5; n < 8; n++) {
for (size_t k = 1; k <= 5; k += 2) {
GemmMicrokernelTester()
@@ -95906,12 +95906,12 @@
.m(2)
.n(4)
.k(k)
- .Test(xnn_qs8_gemm_minmax_fp32_ukernel_2x4__scalar_lrint, xnn_init_qs8_conv_minmax_fp32_scalar_lrint_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qs8_gemm_minmax_fp32_ukernel_2x4__scalar_lrintf, xnn_init_qs8_conv_minmax_fp32_scalar_lrintf_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
}
-TEST(QS8_GEMM_MINMAX_FP32_2X4__SCALAR_LRINT, n_gt_4_strided_cn) {
+TEST(QS8_GEMM_MINMAX_FP32_2X4__SCALAR_LRINTF, n_gt_4_strided_cn) {
for (uint32_t n = 5; n < 8; n++) {
for (size_t k = 1; k <= 5; k += 2) {
GemmMicrokernelTester()
@@ -95923,12 +95923,12 @@
.n(4)
.k(k)
.cn_stride(7)
- .Test(xnn_qs8_gemm_minmax_fp32_ukernel_2x4__scalar_lrint, xnn_init_qs8_conv_minmax_fp32_scalar_lrint_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qs8_gemm_minmax_fp32_ukernel_2x4__scalar_lrintf, xnn_init_qs8_conv_minmax_fp32_scalar_lrintf_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
}
-TEST(QS8_GEMM_MINMAX_FP32_2X4__SCALAR_LRINT, n_gt_4_strided_a) {
+TEST(QS8_GEMM_MINMAX_FP32_2X4__SCALAR_LRINTF, n_gt_4_strided_a) {
for (uint32_t n = 5; n < 8; n++) {
for (size_t k = 1; k <= 5; k += 2) {
GemmMicrokernelTester()
@@ -95940,12 +95940,12 @@
.n(n)
.k(k)
.a_stride(7)
- .Test(xnn_qs8_gemm_minmax_fp32_ukernel_2x4__scalar_lrint, xnn_init_qs8_conv_minmax_fp32_scalar_lrint_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qs8_gemm_minmax_fp32_ukernel_2x4__scalar_lrintf, xnn_init_qs8_conv_minmax_fp32_scalar_lrintf_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
}
-TEST(QS8_GEMM_MINMAX_FP32_2X4__SCALAR_LRINT, n_gt_4_subtile) {
+TEST(QS8_GEMM_MINMAX_FP32_2X4__SCALAR_LRINTF, n_gt_4_subtile) {
for (uint32_t n = 5; n < 8; n++) {
for (size_t k = 1; k <= 5; k += 2) {
for (uint32_t m = 1; m <= 2; m++) {
@@ -95958,13 +95958,13 @@
.n(n)
.k(k)
.iterations(1)
- .Test(xnn_qs8_gemm_minmax_fp32_ukernel_2x4__scalar_lrint, xnn_init_qs8_conv_minmax_fp32_scalar_lrint_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qs8_gemm_minmax_fp32_ukernel_2x4__scalar_lrintf, xnn_init_qs8_conv_minmax_fp32_scalar_lrintf_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
}
}
-TEST(QS8_GEMM_MINMAX_FP32_2X4__SCALAR_LRINT, n_div_4) {
+TEST(QS8_GEMM_MINMAX_FP32_2X4__SCALAR_LRINTF, n_div_4) {
for (uint32_t n = 8; n <= 12; n += 4) {
for (size_t k = 1; k <= 5; k += 2) {
GemmMicrokernelTester()
@@ -95975,12 +95975,12 @@
.m(2)
.n(4)
.k(k)
- .Test(xnn_qs8_gemm_minmax_fp32_ukernel_2x4__scalar_lrint, xnn_init_qs8_conv_minmax_fp32_scalar_lrint_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qs8_gemm_minmax_fp32_ukernel_2x4__scalar_lrintf, xnn_init_qs8_conv_minmax_fp32_scalar_lrintf_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
}
-TEST(QS8_GEMM_MINMAX_FP32_2X4__SCALAR_LRINT, n_div_4_strided_cn) {
+TEST(QS8_GEMM_MINMAX_FP32_2X4__SCALAR_LRINTF, n_div_4_strided_cn) {
for (uint32_t n = 8; n <= 12; n += 4) {
for (size_t k = 1; k <= 5; k += 2) {
GemmMicrokernelTester()
@@ -95992,12 +95992,12 @@
.n(n)
.k(k)
.cn_stride(7)
- .Test(xnn_qs8_gemm_minmax_fp32_ukernel_2x4__scalar_lrint, xnn_init_qs8_conv_minmax_fp32_scalar_lrint_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qs8_gemm_minmax_fp32_ukernel_2x4__scalar_lrintf, xnn_init_qs8_conv_minmax_fp32_scalar_lrintf_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
}
-TEST(QS8_GEMM_MINMAX_FP32_2X4__SCALAR_LRINT, n_div_4_strided_a) {
+TEST(QS8_GEMM_MINMAX_FP32_2X4__SCALAR_LRINTF, n_div_4_strided_a) {
for (uint32_t n = 8; n <= 12; n += 4) {
for (size_t k = 1; k <= 5; k += 2) {
GemmMicrokernelTester()
@@ -96009,12 +96009,12 @@
.n(n)
.k(k)
.a_stride(7)
- .Test(xnn_qs8_gemm_minmax_fp32_ukernel_2x4__scalar_lrint, xnn_init_qs8_conv_minmax_fp32_scalar_lrint_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qs8_gemm_minmax_fp32_ukernel_2x4__scalar_lrintf, xnn_init_qs8_conv_minmax_fp32_scalar_lrintf_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
}
-TEST(QS8_GEMM_MINMAX_FP32_2X4__SCALAR_LRINT, n_div_4_subtile) {
+TEST(QS8_GEMM_MINMAX_FP32_2X4__SCALAR_LRINTF, n_div_4_subtile) {
for (uint32_t n = 8; n <= 12; n += 4) {
for (size_t k = 1; k <= 5; k += 2) {
for (uint32_t m = 1; m <= 2; m++) {
@@ -96027,13 +96027,13 @@
.n(n)
.k(k)
.iterations(1)
- .Test(xnn_qs8_gemm_minmax_fp32_ukernel_2x4__scalar_lrint, xnn_init_qs8_conv_minmax_fp32_scalar_lrint_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qs8_gemm_minmax_fp32_ukernel_2x4__scalar_lrintf, xnn_init_qs8_conv_minmax_fp32_scalar_lrintf_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
}
}
-TEST(QS8_GEMM_MINMAX_FP32_2X4__SCALAR_LRINT, strided_cm_subtile) {
+TEST(QS8_GEMM_MINMAX_FP32_2X4__SCALAR_LRINTF, strided_cm_subtile) {
for (size_t k = 1; k <= 5; k += 2) {
for (uint32_t m = 1; m <= 2; m++) {
for (uint32_t n = 1; n <= 4; n++) {
@@ -96047,13 +96047,13 @@
.k(k)
.cm_stride(7)
.iterations(1)
- .Test(xnn_qs8_gemm_minmax_fp32_ukernel_2x4__scalar_lrint, xnn_init_qs8_conv_minmax_fp32_scalar_lrint_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qs8_gemm_minmax_fp32_ukernel_2x4__scalar_lrintf, xnn_init_qs8_conv_minmax_fp32_scalar_lrintf_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
}
}
-TEST(QS8_GEMM_MINMAX_FP32_2X4__SCALAR_LRINT, qmin) {
+TEST(QS8_GEMM_MINMAX_FP32_2X4__SCALAR_LRINTF, qmin) {
GemmMicrokernelTester()
.mr(2)
.nr(4)
@@ -96063,10 +96063,10 @@
.n(4)
.k(1)
.qmin(128)
- .Test(xnn_qs8_gemm_minmax_fp32_ukernel_2x4__scalar_lrint, xnn_init_qs8_conv_minmax_fp32_scalar_lrint_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qs8_gemm_minmax_fp32_ukernel_2x4__scalar_lrintf, xnn_init_qs8_conv_minmax_fp32_scalar_lrintf_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
-TEST(QS8_GEMM_MINMAX_FP32_2X4__SCALAR_LRINT, qmax) {
+TEST(QS8_GEMM_MINMAX_FP32_2X4__SCALAR_LRINTF, qmax) {
GemmMicrokernelTester()
.mr(2)
.nr(4)
@@ -96076,10 +96076,10 @@
.n(4)
.k(1)
.qmax(128)
- .Test(xnn_qs8_gemm_minmax_fp32_ukernel_2x4__scalar_lrint, xnn_init_qs8_conv_minmax_fp32_scalar_lrint_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qs8_gemm_minmax_fp32_ukernel_2x4__scalar_lrintf, xnn_init_qs8_conv_minmax_fp32_scalar_lrintf_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
-TEST(QS8_GEMM_MINMAX_FP32_2X4__SCALAR_LRINT, strided_cm) {
+TEST(QS8_GEMM_MINMAX_FP32_2X4__SCALAR_LRINTF, strided_cm) {
GemmMicrokernelTester()
.mr(2)
.nr(4)
@@ -96089,11 +96089,11 @@
.n(4)
.k(1)
.cm_stride(7)
- .Test(xnn_qs8_gemm_minmax_fp32_ukernel_2x4__scalar_lrint, xnn_init_qs8_conv_minmax_fp32_scalar_lrint_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qs8_gemm_minmax_fp32_ukernel_2x4__scalar_lrintf, xnn_init_qs8_conv_minmax_fp32_scalar_lrintf_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
-TEST(QS8_GEMM_MINMAX_FP32_3X4__SCALAR_LRINT, k_eq_1) {
+TEST(QS8_GEMM_MINMAX_FP32_3X4__SCALAR_LRINTF, k_eq_1) {
GemmMicrokernelTester()
.mr(3)
.nr(4)
@@ -96102,10 +96102,10 @@
.m(3)
.n(4)
.k(1)
- .Test(xnn_qs8_gemm_minmax_fp32_ukernel_3x4__scalar_lrint, xnn_init_qs8_conv_minmax_fp32_scalar_lrint_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qs8_gemm_minmax_fp32_ukernel_3x4__scalar_lrintf, xnn_init_qs8_conv_minmax_fp32_scalar_lrintf_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
-TEST(QS8_GEMM_MINMAX_FP32_3X4__SCALAR_LRINT, strided_cn) {
+TEST(QS8_GEMM_MINMAX_FP32_3X4__SCALAR_LRINTF, strided_cn) {
GemmMicrokernelTester()
.mr(3)
.nr(4)
@@ -96115,10 +96115,10 @@
.n(4)
.k(1)
.cn_stride(7)
- .Test(xnn_qs8_gemm_minmax_fp32_ukernel_3x4__scalar_lrint, xnn_init_qs8_conv_minmax_fp32_scalar_lrint_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qs8_gemm_minmax_fp32_ukernel_3x4__scalar_lrintf, xnn_init_qs8_conv_minmax_fp32_scalar_lrintf_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
-TEST(QS8_GEMM_MINMAX_FP32_3X4__SCALAR_LRINT, k_eq_1_strided_a) {
+TEST(QS8_GEMM_MINMAX_FP32_3X4__SCALAR_LRINTF, k_eq_1_strided_a) {
GemmMicrokernelTester()
.mr(3)
.nr(4)
@@ -96128,10 +96128,10 @@
.n(4)
.k(1)
.a_stride(3)
- .Test(xnn_qs8_gemm_minmax_fp32_ukernel_3x4__scalar_lrint, xnn_init_qs8_conv_minmax_fp32_scalar_lrint_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qs8_gemm_minmax_fp32_ukernel_3x4__scalar_lrintf, xnn_init_qs8_conv_minmax_fp32_scalar_lrintf_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
-TEST(QS8_GEMM_MINMAX_FP32_3X4__SCALAR_LRINT, k_eq_1_subtile) {
+TEST(QS8_GEMM_MINMAX_FP32_3X4__SCALAR_LRINTF, k_eq_1_subtile) {
for (uint32_t m = 1; m <= 3; m++) {
for (uint32_t n = 1; n <= 4; n++) {
GemmMicrokernelTester()
@@ -96143,12 +96143,12 @@
.n(n)
.k(1)
.iterations(1)
- .Test(xnn_qs8_gemm_minmax_fp32_ukernel_3x4__scalar_lrint, xnn_init_qs8_conv_minmax_fp32_scalar_lrint_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qs8_gemm_minmax_fp32_ukernel_3x4__scalar_lrintf, xnn_init_qs8_conv_minmax_fp32_scalar_lrintf_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
}
-TEST(QS8_GEMM_MINMAX_FP32_3X4__SCALAR_LRINT, k_eq_1_subtile_m) {
+TEST(QS8_GEMM_MINMAX_FP32_3X4__SCALAR_LRINTF, k_eq_1_subtile_m) {
for (uint32_t m = 1; m <= 3; m++) {
GemmMicrokernelTester()
.mr(3)
@@ -96159,11 +96159,11 @@
.n(4)
.k(1)
.iterations(1)
- .Test(xnn_qs8_gemm_minmax_fp32_ukernel_3x4__scalar_lrint, xnn_init_qs8_conv_minmax_fp32_scalar_lrint_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qs8_gemm_minmax_fp32_ukernel_3x4__scalar_lrintf, xnn_init_qs8_conv_minmax_fp32_scalar_lrintf_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
-TEST(QS8_GEMM_MINMAX_FP32_3X4__SCALAR_LRINT, k_eq_1_subtile_n) {
+TEST(QS8_GEMM_MINMAX_FP32_3X4__SCALAR_LRINTF, k_eq_1_subtile_n) {
for (uint32_t n = 1; n <= 4; n++) {
GemmMicrokernelTester()
.mr(3)
@@ -96174,11 +96174,11 @@
.n(n)
.k(1)
.iterations(1)
- .Test(xnn_qs8_gemm_minmax_fp32_ukernel_3x4__scalar_lrint, xnn_init_qs8_conv_minmax_fp32_scalar_lrint_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qs8_gemm_minmax_fp32_ukernel_3x4__scalar_lrintf, xnn_init_qs8_conv_minmax_fp32_scalar_lrintf_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
-TEST(QS8_GEMM_MINMAX_FP32_3X4__SCALAR_LRINT, k_gt_1) {
+TEST(QS8_GEMM_MINMAX_FP32_3X4__SCALAR_LRINTF, k_gt_1) {
for (size_t k = 2; k < 10; k++) {
GemmMicrokernelTester()
.mr(3)
@@ -96188,11 +96188,11 @@
.m(3)
.n(4)
.k(k)
- .Test(xnn_qs8_gemm_minmax_fp32_ukernel_3x4__scalar_lrint, xnn_init_qs8_conv_minmax_fp32_scalar_lrint_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qs8_gemm_minmax_fp32_ukernel_3x4__scalar_lrintf, xnn_init_qs8_conv_minmax_fp32_scalar_lrintf_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
-TEST(QS8_GEMM_MINMAX_FP32_3X4__SCALAR_LRINT, k_gt_1_strided_a) {
+TEST(QS8_GEMM_MINMAX_FP32_3X4__SCALAR_LRINTF, k_gt_1_strided_a) {
for (size_t k = 2; k < 10; k++) {
GemmMicrokernelTester()
.mr(3)
@@ -96203,11 +96203,11 @@
.n(4)
.k(k)
.a_stride(11)
- .Test(xnn_qs8_gemm_minmax_fp32_ukernel_3x4__scalar_lrint, xnn_init_qs8_conv_minmax_fp32_scalar_lrint_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qs8_gemm_minmax_fp32_ukernel_3x4__scalar_lrintf, xnn_init_qs8_conv_minmax_fp32_scalar_lrintf_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
-TEST(QS8_GEMM_MINMAX_FP32_3X4__SCALAR_LRINT, k_gt_1_subtile) {
+TEST(QS8_GEMM_MINMAX_FP32_3X4__SCALAR_LRINTF, k_gt_1_subtile) {
for (size_t k = 2; k < 10; k++) {
for (uint32_t m = 1; m <= 3; m++) {
for (uint32_t n = 1; n <= 4; n++) {
@@ -96220,13 +96220,13 @@
.n(n)
.k(k)
.iterations(1)
- .Test(xnn_qs8_gemm_minmax_fp32_ukernel_3x4__scalar_lrint, xnn_init_qs8_conv_minmax_fp32_scalar_lrint_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qs8_gemm_minmax_fp32_ukernel_3x4__scalar_lrintf, xnn_init_qs8_conv_minmax_fp32_scalar_lrintf_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
}
}
-TEST(QS8_GEMM_MINMAX_FP32_3X4__SCALAR_LRINT, n_gt_4) {
+TEST(QS8_GEMM_MINMAX_FP32_3X4__SCALAR_LRINTF, n_gt_4) {
for (uint32_t n = 5; n < 8; n++) {
for (size_t k = 1; k <= 5; k += 2) {
GemmMicrokernelTester()
@@ -96237,12 +96237,12 @@
.m(3)
.n(4)
.k(k)
- .Test(xnn_qs8_gemm_minmax_fp32_ukernel_3x4__scalar_lrint, xnn_init_qs8_conv_minmax_fp32_scalar_lrint_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qs8_gemm_minmax_fp32_ukernel_3x4__scalar_lrintf, xnn_init_qs8_conv_minmax_fp32_scalar_lrintf_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
}
-TEST(QS8_GEMM_MINMAX_FP32_3X4__SCALAR_LRINT, n_gt_4_strided_cn) {
+TEST(QS8_GEMM_MINMAX_FP32_3X4__SCALAR_LRINTF, n_gt_4_strided_cn) {
for (uint32_t n = 5; n < 8; n++) {
for (size_t k = 1; k <= 5; k += 2) {
GemmMicrokernelTester()
@@ -96254,12 +96254,12 @@
.n(4)
.k(k)
.cn_stride(7)
- .Test(xnn_qs8_gemm_minmax_fp32_ukernel_3x4__scalar_lrint, xnn_init_qs8_conv_minmax_fp32_scalar_lrint_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qs8_gemm_minmax_fp32_ukernel_3x4__scalar_lrintf, xnn_init_qs8_conv_minmax_fp32_scalar_lrintf_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
}
-TEST(QS8_GEMM_MINMAX_FP32_3X4__SCALAR_LRINT, n_gt_4_strided_a) {
+TEST(QS8_GEMM_MINMAX_FP32_3X4__SCALAR_LRINTF, n_gt_4_strided_a) {
for (uint32_t n = 5; n < 8; n++) {
for (size_t k = 1; k <= 5; k += 2) {
GemmMicrokernelTester()
@@ -96271,12 +96271,12 @@
.n(n)
.k(k)
.a_stride(7)
- .Test(xnn_qs8_gemm_minmax_fp32_ukernel_3x4__scalar_lrint, xnn_init_qs8_conv_minmax_fp32_scalar_lrint_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qs8_gemm_minmax_fp32_ukernel_3x4__scalar_lrintf, xnn_init_qs8_conv_minmax_fp32_scalar_lrintf_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
}
-TEST(QS8_GEMM_MINMAX_FP32_3X4__SCALAR_LRINT, n_gt_4_subtile) {
+TEST(QS8_GEMM_MINMAX_FP32_3X4__SCALAR_LRINTF, n_gt_4_subtile) {
for (uint32_t n = 5; n < 8; n++) {
for (size_t k = 1; k <= 5; k += 2) {
for (uint32_t m = 1; m <= 3; m++) {
@@ -96289,13 +96289,13 @@
.n(n)
.k(k)
.iterations(1)
- .Test(xnn_qs8_gemm_minmax_fp32_ukernel_3x4__scalar_lrint, xnn_init_qs8_conv_minmax_fp32_scalar_lrint_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qs8_gemm_minmax_fp32_ukernel_3x4__scalar_lrintf, xnn_init_qs8_conv_minmax_fp32_scalar_lrintf_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
}
}
-TEST(QS8_GEMM_MINMAX_FP32_3X4__SCALAR_LRINT, n_div_4) {
+TEST(QS8_GEMM_MINMAX_FP32_3X4__SCALAR_LRINTF, n_div_4) {
for (uint32_t n = 8; n <= 12; n += 4) {
for (size_t k = 1; k <= 5; k += 2) {
GemmMicrokernelTester()
@@ -96306,12 +96306,12 @@
.m(3)
.n(4)
.k(k)
- .Test(xnn_qs8_gemm_minmax_fp32_ukernel_3x4__scalar_lrint, xnn_init_qs8_conv_minmax_fp32_scalar_lrint_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qs8_gemm_minmax_fp32_ukernel_3x4__scalar_lrintf, xnn_init_qs8_conv_minmax_fp32_scalar_lrintf_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
}
-TEST(QS8_GEMM_MINMAX_FP32_3X4__SCALAR_LRINT, n_div_4_strided_cn) {
+TEST(QS8_GEMM_MINMAX_FP32_3X4__SCALAR_LRINTF, n_div_4_strided_cn) {
for (uint32_t n = 8; n <= 12; n += 4) {
for (size_t k = 1; k <= 5; k += 2) {
GemmMicrokernelTester()
@@ -96323,12 +96323,12 @@
.n(n)
.k(k)
.cn_stride(7)
- .Test(xnn_qs8_gemm_minmax_fp32_ukernel_3x4__scalar_lrint, xnn_init_qs8_conv_minmax_fp32_scalar_lrint_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qs8_gemm_minmax_fp32_ukernel_3x4__scalar_lrintf, xnn_init_qs8_conv_minmax_fp32_scalar_lrintf_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
}
-TEST(QS8_GEMM_MINMAX_FP32_3X4__SCALAR_LRINT, n_div_4_strided_a) {
+TEST(QS8_GEMM_MINMAX_FP32_3X4__SCALAR_LRINTF, n_div_4_strided_a) {
for (uint32_t n = 8; n <= 12; n += 4) {
for (size_t k = 1; k <= 5; k += 2) {
GemmMicrokernelTester()
@@ -96340,12 +96340,12 @@
.n(n)
.k(k)
.a_stride(7)
- .Test(xnn_qs8_gemm_minmax_fp32_ukernel_3x4__scalar_lrint, xnn_init_qs8_conv_minmax_fp32_scalar_lrint_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qs8_gemm_minmax_fp32_ukernel_3x4__scalar_lrintf, xnn_init_qs8_conv_minmax_fp32_scalar_lrintf_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
}
-TEST(QS8_GEMM_MINMAX_FP32_3X4__SCALAR_LRINT, n_div_4_subtile) {
+TEST(QS8_GEMM_MINMAX_FP32_3X4__SCALAR_LRINTF, n_div_4_subtile) {
for (uint32_t n = 8; n <= 12; n += 4) {
for (size_t k = 1; k <= 5; k += 2) {
for (uint32_t m = 1; m <= 3; m++) {
@@ -96358,13 +96358,13 @@
.n(n)
.k(k)
.iterations(1)
- .Test(xnn_qs8_gemm_minmax_fp32_ukernel_3x4__scalar_lrint, xnn_init_qs8_conv_minmax_fp32_scalar_lrint_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qs8_gemm_minmax_fp32_ukernel_3x4__scalar_lrintf, xnn_init_qs8_conv_minmax_fp32_scalar_lrintf_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
}
}
-TEST(QS8_GEMM_MINMAX_FP32_3X4__SCALAR_LRINT, strided_cm_subtile) {
+TEST(QS8_GEMM_MINMAX_FP32_3X4__SCALAR_LRINTF, strided_cm_subtile) {
for (size_t k = 1; k <= 5; k += 2) {
for (uint32_t m = 1; m <= 3; m++) {
for (uint32_t n = 1; n <= 4; n++) {
@@ -96378,13 +96378,13 @@
.k(k)
.cm_stride(7)
.iterations(1)
- .Test(xnn_qs8_gemm_minmax_fp32_ukernel_3x4__scalar_lrint, xnn_init_qs8_conv_minmax_fp32_scalar_lrint_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qs8_gemm_minmax_fp32_ukernel_3x4__scalar_lrintf, xnn_init_qs8_conv_minmax_fp32_scalar_lrintf_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
}
}
-TEST(QS8_GEMM_MINMAX_FP32_3X4__SCALAR_LRINT, qmin) {
+TEST(QS8_GEMM_MINMAX_FP32_3X4__SCALAR_LRINTF, qmin) {
GemmMicrokernelTester()
.mr(3)
.nr(4)
@@ -96394,10 +96394,10 @@
.n(4)
.k(1)
.qmin(128)
- .Test(xnn_qs8_gemm_minmax_fp32_ukernel_3x4__scalar_lrint, xnn_init_qs8_conv_minmax_fp32_scalar_lrint_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qs8_gemm_minmax_fp32_ukernel_3x4__scalar_lrintf, xnn_init_qs8_conv_minmax_fp32_scalar_lrintf_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
-TEST(QS8_GEMM_MINMAX_FP32_3X4__SCALAR_LRINT, qmax) {
+TEST(QS8_GEMM_MINMAX_FP32_3X4__SCALAR_LRINTF, qmax) {
GemmMicrokernelTester()
.mr(3)
.nr(4)
@@ -96407,10 +96407,10 @@
.n(4)
.k(1)
.qmax(128)
- .Test(xnn_qs8_gemm_minmax_fp32_ukernel_3x4__scalar_lrint, xnn_init_qs8_conv_minmax_fp32_scalar_lrint_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qs8_gemm_minmax_fp32_ukernel_3x4__scalar_lrintf, xnn_init_qs8_conv_minmax_fp32_scalar_lrintf_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
-TEST(QS8_GEMM_MINMAX_FP32_3X4__SCALAR_LRINT, strided_cm) {
+TEST(QS8_GEMM_MINMAX_FP32_3X4__SCALAR_LRINTF, strided_cm) {
GemmMicrokernelTester()
.mr(3)
.nr(4)
@@ -96420,11 +96420,11 @@
.n(4)
.k(1)
.cm_stride(7)
- .Test(xnn_qs8_gemm_minmax_fp32_ukernel_3x4__scalar_lrint, xnn_init_qs8_conv_minmax_fp32_scalar_lrint_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qs8_gemm_minmax_fp32_ukernel_3x4__scalar_lrintf, xnn_init_qs8_conv_minmax_fp32_scalar_lrintf_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
-TEST(QS8_GEMM_MINMAX_FP32_4X4__SCALAR_LRINT, k_eq_1) {
+TEST(QS8_GEMM_MINMAX_FP32_4X4__SCALAR_LRINTF, k_eq_1) {
GemmMicrokernelTester()
.mr(4)
.nr(4)
@@ -96433,10 +96433,10 @@
.m(4)
.n(4)
.k(1)
- .Test(xnn_qs8_gemm_minmax_fp32_ukernel_4x4__scalar_lrint, xnn_init_qs8_conv_minmax_fp32_scalar_lrint_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qs8_gemm_minmax_fp32_ukernel_4x4__scalar_lrintf, xnn_init_qs8_conv_minmax_fp32_scalar_lrintf_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
-TEST(QS8_GEMM_MINMAX_FP32_4X4__SCALAR_LRINT, strided_cn) {
+TEST(QS8_GEMM_MINMAX_FP32_4X4__SCALAR_LRINTF, strided_cn) {
GemmMicrokernelTester()
.mr(4)
.nr(4)
@@ -96446,10 +96446,10 @@
.n(4)
.k(1)
.cn_stride(7)
- .Test(xnn_qs8_gemm_minmax_fp32_ukernel_4x4__scalar_lrint, xnn_init_qs8_conv_minmax_fp32_scalar_lrint_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qs8_gemm_minmax_fp32_ukernel_4x4__scalar_lrintf, xnn_init_qs8_conv_minmax_fp32_scalar_lrintf_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
-TEST(QS8_GEMM_MINMAX_FP32_4X4__SCALAR_LRINT, k_eq_1_strided_a) {
+TEST(QS8_GEMM_MINMAX_FP32_4X4__SCALAR_LRINTF, k_eq_1_strided_a) {
GemmMicrokernelTester()
.mr(4)
.nr(4)
@@ -96459,10 +96459,10 @@
.n(4)
.k(1)
.a_stride(3)
- .Test(xnn_qs8_gemm_minmax_fp32_ukernel_4x4__scalar_lrint, xnn_init_qs8_conv_minmax_fp32_scalar_lrint_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qs8_gemm_minmax_fp32_ukernel_4x4__scalar_lrintf, xnn_init_qs8_conv_minmax_fp32_scalar_lrintf_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
-TEST(QS8_GEMM_MINMAX_FP32_4X4__SCALAR_LRINT, k_eq_1_subtile) {
+TEST(QS8_GEMM_MINMAX_FP32_4X4__SCALAR_LRINTF, k_eq_1_subtile) {
for (uint32_t m = 1; m <= 4; m++) {
for (uint32_t n = 1; n <= 4; n++) {
GemmMicrokernelTester()
@@ -96474,12 +96474,12 @@
.n(n)
.k(1)
.iterations(1)
- .Test(xnn_qs8_gemm_minmax_fp32_ukernel_4x4__scalar_lrint, xnn_init_qs8_conv_minmax_fp32_scalar_lrint_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qs8_gemm_minmax_fp32_ukernel_4x4__scalar_lrintf, xnn_init_qs8_conv_minmax_fp32_scalar_lrintf_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
}
-TEST(QS8_GEMM_MINMAX_FP32_4X4__SCALAR_LRINT, k_eq_1_subtile_m) {
+TEST(QS8_GEMM_MINMAX_FP32_4X4__SCALAR_LRINTF, k_eq_1_subtile_m) {
for (uint32_t m = 1; m <= 4; m++) {
GemmMicrokernelTester()
.mr(4)
@@ -96490,11 +96490,11 @@
.n(4)
.k(1)
.iterations(1)
- .Test(xnn_qs8_gemm_minmax_fp32_ukernel_4x4__scalar_lrint, xnn_init_qs8_conv_minmax_fp32_scalar_lrint_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qs8_gemm_minmax_fp32_ukernel_4x4__scalar_lrintf, xnn_init_qs8_conv_minmax_fp32_scalar_lrintf_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
-TEST(QS8_GEMM_MINMAX_FP32_4X4__SCALAR_LRINT, k_eq_1_subtile_n) {
+TEST(QS8_GEMM_MINMAX_FP32_4X4__SCALAR_LRINTF, k_eq_1_subtile_n) {
for (uint32_t n = 1; n <= 4; n++) {
GemmMicrokernelTester()
.mr(4)
@@ -96505,11 +96505,11 @@
.n(n)
.k(1)
.iterations(1)
- .Test(xnn_qs8_gemm_minmax_fp32_ukernel_4x4__scalar_lrint, xnn_init_qs8_conv_minmax_fp32_scalar_lrint_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qs8_gemm_minmax_fp32_ukernel_4x4__scalar_lrintf, xnn_init_qs8_conv_minmax_fp32_scalar_lrintf_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
-TEST(QS8_GEMM_MINMAX_FP32_4X4__SCALAR_LRINT, k_gt_1) {
+TEST(QS8_GEMM_MINMAX_FP32_4X4__SCALAR_LRINTF, k_gt_1) {
for (size_t k = 2; k < 10; k++) {
GemmMicrokernelTester()
.mr(4)
@@ -96519,11 +96519,11 @@
.m(4)
.n(4)
.k(k)
- .Test(xnn_qs8_gemm_minmax_fp32_ukernel_4x4__scalar_lrint, xnn_init_qs8_conv_minmax_fp32_scalar_lrint_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qs8_gemm_minmax_fp32_ukernel_4x4__scalar_lrintf, xnn_init_qs8_conv_minmax_fp32_scalar_lrintf_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
-TEST(QS8_GEMM_MINMAX_FP32_4X4__SCALAR_LRINT, k_gt_1_strided_a) {
+TEST(QS8_GEMM_MINMAX_FP32_4X4__SCALAR_LRINTF, k_gt_1_strided_a) {
for (size_t k = 2; k < 10; k++) {
GemmMicrokernelTester()
.mr(4)
@@ -96534,11 +96534,11 @@
.n(4)
.k(k)
.a_stride(11)
- .Test(xnn_qs8_gemm_minmax_fp32_ukernel_4x4__scalar_lrint, xnn_init_qs8_conv_minmax_fp32_scalar_lrint_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qs8_gemm_minmax_fp32_ukernel_4x4__scalar_lrintf, xnn_init_qs8_conv_minmax_fp32_scalar_lrintf_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
-TEST(QS8_GEMM_MINMAX_FP32_4X4__SCALAR_LRINT, k_gt_1_subtile) {
+TEST(QS8_GEMM_MINMAX_FP32_4X4__SCALAR_LRINTF, k_gt_1_subtile) {
for (size_t k = 2; k < 10; k++) {
for (uint32_t m = 1; m <= 4; m++) {
for (uint32_t n = 1; n <= 4; n++) {
@@ -96551,13 +96551,13 @@
.n(n)
.k(k)
.iterations(1)
- .Test(xnn_qs8_gemm_minmax_fp32_ukernel_4x4__scalar_lrint, xnn_init_qs8_conv_minmax_fp32_scalar_lrint_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qs8_gemm_minmax_fp32_ukernel_4x4__scalar_lrintf, xnn_init_qs8_conv_minmax_fp32_scalar_lrintf_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
}
}
-TEST(QS8_GEMM_MINMAX_FP32_4X4__SCALAR_LRINT, n_gt_4) {
+TEST(QS8_GEMM_MINMAX_FP32_4X4__SCALAR_LRINTF, n_gt_4) {
for (uint32_t n = 5; n < 8; n++) {
for (size_t k = 1; k <= 5; k += 2) {
GemmMicrokernelTester()
@@ -96568,12 +96568,12 @@
.m(4)
.n(4)
.k(k)
- .Test(xnn_qs8_gemm_minmax_fp32_ukernel_4x4__scalar_lrint, xnn_init_qs8_conv_minmax_fp32_scalar_lrint_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qs8_gemm_minmax_fp32_ukernel_4x4__scalar_lrintf, xnn_init_qs8_conv_minmax_fp32_scalar_lrintf_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
}
-TEST(QS8_GEMM_MINMAX_FP32_4X4__SCALAR_LRINT, n_gt_4_strided_cn) {
+TEST(QS8_GEMM_MINMAX_FP32_4X4__SCALAR_LRINTF, n_gt_4_strided_cn) {
for (uint32_t n = 5; n < 8; n++) {
for (size_t k = 1; k <= 5; k += 2) {
GemmMicrokernelTester()
@@ -96585,12 +96585,12 @@
.n(4)
.k(k)
.cn_stride(7)
- .Test(xnn_qs8_gemm_minmax_fp32_ukernel_4x4__scalar_lrint, xnn_init_qs8_conv_minmax_fp32_scalar_lrint_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qs8_gemm_minmax_fp32_ukernel_4x4__scalar_lrintf, xnn_init_qs8_conv_minmax_fp32_scalar_lrintf_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
}
-TEST(QS8_GEMM_MINMAX_FP32_4X4__SCALAR_LRINT, n_gt_4_strided_a) {
+TEST(QS8_GEMM_MINMAX_FP32_4X4__SCALAR_LRINTF, n_gt_4_strided_a) {
for (uint32_t n = 5; n < 8; n++) {
for (size_t k = 1; k <= 5; k += 2) {
GemmMicrokernelTester()
@@ -96602,12 +96602,12 @@
.n(n)
.k(k)
.a_stride(7)
- .Test(xnn_qs8_gemm_minmax_fp32_ukernel_4x4__scalar_lrint, xnn_init_qs8_conv_minmax_fp32_scalar_lrint_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qs8_gemm_minmax_fp32_ukernel_4x4__scalar_lrintf, xnn_init_qs8_conv_minmax_fp32_scalar_lrintf_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
}
-TEST(QS8_GEMM_MINMAX_FP32_4X4__SCALAR_LRINT, n_gt_4_subtile) {
+TEST(QS8_GEMM_MINMAX_FP32_4X4__SCALAR_LRINTF, n_gt_4_subtile) {
for (uint32_t n = 5; n < 8; n++) {
for (size_t k = 1; k <= 5; k += 2) {
for (uint32_t m = 1; m <= 4; m++) {
@@ -96620,13 +96620,13 @@
.n(n)
.k(k)
.iterations(1)
- .Test(xnn_qs8_gemm_minmax_fp32_ukernel_4x4__scalar_lrint, xnn_init_qs8_conv_minmax_fp32_scalar_lrint_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qs8_gemm_minmax_fp32_ukernel_4x4__scalar_lrintf, xnn_init_qs8_conv_minmax_fp32_scalar_lrintf_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
}
}
-TEST(QS8_GEMM_MINMAX_FP32_4X4__SCALAR_LRINT, n_div_4) {
+TEST(QS8_GEMM_MINMAX_FP32_4X4__SCALAR_LRINTF, n_div_4) {
for (uint32_t n = 8; n <= 12; n += 4) {
for (size_t k = 1; k <= 5; k += 2) {
GemmMicrokernelTester()
@@ -96637,12 +96637,12 @@
.m(4)
.n(4)
.k(k)
- .Test(xnn_qs8_gemm_minmax_fp32_ukernel_4x4__scalar_lrint, xnn_init_qs8_conv_minmax_fp32_scalar_lrint_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qs8_gemm_minmax_fp32_ukernel_4x4__scalar_lrintf, xnn_init_qs8_conv_minmax_fp32_scalar_lrintf_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
}
-TEST(QS8_GEMM_MINMAX_FP32_4X4__SCALAR_LRINT, n_div_4_strided_cn) {
+TEST(QS8_GEMM_MINMAX_FP32_4X4__SCALAR_LRINTF, n_div_4_strided_cn) {
for (uint32_t n = 8; n <= 12; n += 4) {
for (size_t k = 1; k <= 5; k += 2) {
GemmMicrokernelTester()
@@ -96654,12 +96654,12 @@
.n(n)
.k(k)
.cn_stride(7)
- .Test(xnn_qs8_gemm_minmax_fp32_ukernel_4x4__scalar_lrint, xnn_init_qs8_conv_minmax_fp32_scalar_lrint_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qs8_gemm_minmax_fp32_ukernel_4x4__scalar_lrintf, xnn_init_qs8_conv_minmax_fp32_scalar_lrintf_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
}
-TEST(QS8_GEMM_MINMAX_FP32_4X4__SCALAR_LRINT, n_div_4_strided_a) {
+TEST(QS8_GEMM_MINMAX_FP32_4X4__SCALAR_LRINTF, n_div_4_strided_a) {
for (uint32_t n = 8; n <= 12; n += 4) {
for (size_t k = 1; k <= 5; k += 2) {
GemmMicrokernelTester()
@@ -96671,12 +96671,12 @@
.n(n)
.k(k)
.a_stride(7)
- .Test(xnn_qs8_gemm_minmax_fp32_ukernel_4x4__scalar_lrint, xnn_init_qs8_conv_minmax_fp32_scalar_lrint_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qs8_gemm_minmax_fp32_ukernel_4x4__scalar_lrintf, xnn_init_qs8_conv_minmax_fp32_scalar_lrintf_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
}
-TEST(QS8_GEMM_MINMAX_FP32_4X4__SCALAR_LRINT, n_div_4_subtile) {
+TEST(QS8_GEMM_MINMAX_FP32_4X4__SCALAR_LRINTF, n_div_4_subtile) {
for (uint32_t n = 8; n <= 12; n += 4) {
for (size_t k = 1; k <= 5; k += 2) {
for (uint32_t m = 1; m <= 4; m++) {
@@ -96689,13 +96689,13 @@
.n(n)
.k(k)
.iterations(1)
- .Test(xnn_qs8_gemm_minmax_fp32_ukernel_4x4__scalar_lrint, xnn_init_qs8_conv_minmax_fp32_scalar_lrint_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qs8_gemm_minmax_fp32_ukernel_4x4__scalar_lrintf, xnn_init_qs8_conv_minmax_fp32_scalar_lrintf_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
}
}
-TEST(QS8_GEMM_MINMAX_FP32_4X4__SCALAR_LRINT, strided_cm_subtile) {
+TEST(QS8_GEMM_MINMAX_FP32_4X4__SCALAR_LRINTF, strided_cm_subtile) {
for (size_t k = 1; k <= 5; k += 2) {
for (uint32_t m = 1; m <= 4; m++) {
for (uint32_t n = 1; n <= 4; n++) {
@@ -96709,13 +96709,13 @@
.k(k)
.cm_stride(7)
.iterations(1)
- .Test(xnn_qs8_gemm_minmax_fp32_ukernel_4x4__scalar_lrint, xnn_init_qs8_conv_minmax_fp32_scalar_lrint_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qs8_gemm_minmax_fp32_ukernel_4x4__scalar_lrintf, xnn_init_qs8_conv_minmax_fp32_scalar_lrintf_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
}
}
-TEST(QS8_GEMM_MINMAX_FP32_4X4__SCALAR_LRINT, qmin) {
+TEST(QS8_GEMM_MINMAX_FP32_4X4__SCALAR_LRINTF, qmin) {
GemmMicrokernelTester()
.mr(4)
.nr(4)
@@ -96725,10 +96725,10 @@
.n(4)
.k(1)
.qmin(128)
- .Test(xnn_qs8_gemm_minmax_fp32_ukernel_4x4__scalar_lrint, xnn_init_qs8_conv_minmax_fp32_scalar_lrint_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qs8_gemm_minmax_fp32_ukernel_4x4__scalar_lrintf, xnn_init_qs8_conv_minmax_fp32_scalar_lrintf_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
-TEST(QS8_GEMM_MINMAX_FP32_4X4__SCALAR_LRINT, qmax) {
+TEST(QS8_GEMM_MINMAX_FP32_4X4__SCALAR_LRINTF, qmax) {
GemmMicrokernelTester()
.mr(4)
.nr(4)
@@ -96738,10 +96738,10 @@
.n(4)
.k(1)
.qmax(128)
- .Test(xnn_qs8_gemm_minmax_fp32_ukernel_4x4__scalar_lrint, xnn_init_qs8_conv_minmax_fp32_scalar_lrint_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qs8_gemm_minmax_fp32_ukernel_4x4__scalar_lrintf, xnn_init_qs8_conv_minmax_fp32_scalar_lrintf_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
-TEST(QS8_GEMM_MINMAX_FP32_4X4__SCALAR_LRINT, strided_cm) {
+TEST(QS8_GEMM_MINMAX_FP32_4X4__SCALAR_LRINTF, strided_cm) {
GemmMicrokernelTester()
.mr(4)
.nr(4)
@@ -96751,11 +96751,11 @@
.n(4)
.k(1)
.cm_stride(7)
- .Test(xnn_qs8_gemm_minmax_fp32_ukernel_4x4__scalar_lrint, xnn_init_qs8_conv_minmax_fp32_scalar_lrint_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qs8_gemm_minmax_fp32_ukernel_4x4__scalar_lrintf, xnn_init_qs8_conv_minmax_fp32_scalar_lrintf_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
-TEST(QS8_GEMM_MINMAX_FP32_1X2__SCALAR_MAGIC, k_eq_1) {
+TEST(QS8_GEMM_MINMAX_FP32_1X2__SCALAR_FMAGIC, k_eq_1) {
GemmMicrokernelTester()
.mr(1)
.nr(2)
@@ -96764,10 +96764,10 @@
.m(1)
.n(2)
.k(1)
- .Test(xnn_qs8_gemm_minmax_fp32_ukernel_1x2__scalar_magic, xnn_init_qs8_conv_minmax_fp32_scalar_magic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qs8_gemm_minmax_fp32_ukernel_1x2__scalar_fmagic, xnn_init_qs8_conv_minmax_fp32_scalar_fmagic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
-TEST(QS8_GEMM_MINMAX_FP32_1X2__SCALAR_MAGIC, strided_cn) {
+TEST(QS8_GEMM_MINMAX_FP32_1X2__SCALAR_FMAGIC, strided_cn) {
GemmMicrokernelTester()
.mr(1)
.nr(2)
@@ -96777,10 +96777,10 @@
.n(2)
.k(1)
.cn_stride(5)
- .Test(xnn_qs8_gemm_minmax_fp32_ukernel_1x2__scalar_magic, xnn_init_qs8_conv_minmax_fp32_scalar_magic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qs8_gemm_minmax_fp32_ukernel_1x2__scalar_fmagic, xnn_init_qs8_conv_minmax_fp32_scalar_fmagic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
-TEST(QS8_GEMM_MINMAX_FP32_1X2__SCALAR_MAGIC, k_eq_1_strided_a) {
+TEST(QS8_GEMM_MINMAX_FP32_1X2__SCALAR_FMAGIC, k_eq_1_strided_a) {
GemmMicrokernelTester()
.mr(1)
.nr(2)
@@ -96790,10 +96790,10 @@
.n(2)
.k(1)
.a_stride(3)
- .Test(xnn_qs8_gemm_minmax_fp32_ukernel_1x2__scalar_magic, xnn_init_qs8_conv_minmax_fp32_scalar_magic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qs8_gemm_minmax_fp32_ukernel_1x2__scalar_fmagic, xnn_init_qs8_conv_minmax_fp32_scalar_fmagic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
-TEST(QS8_GEMM_MINMAX_FP32_1X2__SCALAR_MAGIC, k_eq_1_subtile) {
+TEST(QS8_GEMM_MINMAX_FP32_1X2__SCALAR_FMAGIC, k_eq_1_subtile) {
for (uint32_t m = 1; m <= 1; m++) {
for (uint32_t n = 1; n <= 2; n++) {
GemmMicrokernelTester()
@@ -96805,12 +96805,12 @@
.n(n)
.k(1)
.iterations(1)
- .Test(xnn_qs8_gemm_minmax_fp32_ukernel_1x2__scalar_magic, xnn_init_qs8_conv_minmax_fp32_scalar_magic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qs8_gemm_minmax_fp32_ukernel_1x2__scalar_fmagic, xnn_init_qs8_conv_minmax_fp32_scalar_fmagic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
}
-TEST(QS8_GEMM_MINMAX_FP32_1X2__SCALAR_MAGIC, k_eq_1_subtile_m) {
+TEST(QS8_GEMM_MINMAX_FP32_1X2__SCALAR_FMAGIC, k_eq_1_subtile_m) {
for (uint32_t m = 1; m <= 1; m++) {
GemmMicrokernelTester()
.mr(1)
@@ -96821,11 +96821,11 @@
.n(2)
.k(1)
.iterations(1)
- .Test(xnn_qs8_gemm_minmax_fp32_ukernel_1x2__scalar_magic, xnn_init_qs8_conv_minmax_fp32_scalar_magic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qs8_gemm_minmax_fp32_ukernel_1x2__scalar_fmagic, xnn_init_qs8_conv_minmax_fp32_scalar_fmagic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
-TEST(QS8_GEMM_MINMAX_FP32_1X2__SCALAR_MAGIC, k_eq_1_subtile_n) {
+TEST(QS8_GEMM_MINMAX_FP32_1X2__SCALAR_FMAGIC, k_eq_1_subtile_n) {
for (uint32_t n = 1; n <= 2; n++) {
GemmMicrokernelTester()
.mr(1)
@@ -96836,11 +96836,11 @@
.n(n)
.k(1)
.iterations(1)
- .Test(xnn_qs8_gemm_minmax_fp32_ukernel_1x2__scalar_magic, xnn_init_qs8_conv_minmax_fp32_scalar_magic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qs8_gemm_minmax_fp32_ukernel_1x2__scalar_fmagic, xnn_init_qs8_conv_minmax_fp32_scalar_fmagic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
-TEST(QS8_GEMM_MINMAX_FP32_1X2__SCALAR_MAGIC, k_gt_1) {
+TEST(QS8_GEMM_MINMAX_FP32_1X2__SCALAR_FMAGIC, k_gt_1) {
for (size_t k = 2; k < 10; k++) {
GemmMicrokernelTester()
.mr(1)
@@ -96850,11 +96850,11 @@
.m(1)
.n(2)
.k(k)
- .Test(xnn_qs8_gemm_minmax_fp32_ukernel_1x2__scalar_magic, xnn_init_qs8_conv_minmax_fp32_scalar_magic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qs8_gemm_minmax_fp32_ukernel_1x2__scalar_fmagic, xnn_init_qs8_conv_minmax_fp32_scalar_fmagic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
-TEST(QS8_GEMM_MINMAX_FP32_1X2__SCALAR_MAGIC, k_gt_1_strided_a) {
+TEST(QS8_GEMM_MINMAX_FP32_1X2__SCALAR_FMAGIC, k_gt_1_strided_a) {
for (size_t k = 2; k < 10; k++) {
GemmMicrokernelTester()
.mr(1)
@@ -96865,11 +96865,11 @@
.n(2)
.k(k)
.a_stride(11)
- .Test(xnn_qs8_gemm_minmax_fp32_ukernel_1x2__scalar_magic, xnn_init_qs8_conv_minmax_fp32_scalar_magic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qs8_gemm_minmax_fp32_ukernel_1x2__scalar_fmagic, xnn_init_qs8_conv_minmax_fp32_scalar_fmagic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
-TEST(QS8_GEMM_MINMAX_FP32_1X2__SCALAR_MAGIC, k_gt_1_subtile) {
+TEST(QS8_GEMM_MINMAX_FP32_1X2__SCALAR_FMAGIC, k_gt_1_subtile) {
for (size_t k = 2; k < 10; k++) {
for (uint32_t m = 1; m <= 1; m++) {
for (uint32_t n = 1; n <= 2; n++) {
@@ -96882,13 +96882,13 @@
.n(n)
.k(k)
.iterations(1)
- .Test(xnn_qs8_gemm_minmax_fp32_ukernel_1x2__scalar_magic, xnn_init_qs8_conv_minmax_fp32_scalar_magic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qs8_gemm_minmax_fp32_ukernel_1x2__scalar_fmagic, xnn_init_qs8_conv_minmax_fp32_scalar_fmagic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
}
}
-TEST(QS8_GEMM_MINMAX_FP32_1X2__SCALAR_MAGIC, n_gt_2) {
+TEST(QS8_GEMM_MINMAX_FP32_1X2__SCALAR_FMAGIC, n_gt_2) {
for (uint32_t n = 3; n < 4; n++) {
for (size_t k = 1; k <= 5; k += 2) {
GemmMicrokernelTester()
@@ -96899,12 +96899,12 @@
.m(1)
.n(2)
.k(k)
- .Test(xnn_qs8_gemm_minmax_fp32_ukernel_1x2__scalar_magic, xnn_init_qs8_conv_minmax_fp32_scalar_magic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qs8_gemm_minmax_fp32_ukernel_1x2__scalar_fmagic, xnn_init_qs8_conv_minmax_fp32_scalar_fmagic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
}
-TEST(QS8_GEMM_MINMAX_FP32_1X2__SCALAR_MAGIC, n_gt_2_strided_cn) {
+TEST(QS8_GEMM_MINMAX_FP32_1X2__SCALAR_FMAGIC, n_gt_2_strided_cn) {
for (uint32_t n = 3; n < 4; n++) {
for (size_t k = 1; k <= 5; k += 2) {
GemmMicrokernelTester()
@@ -96916,12 +96916,12 @@
.n(2)
.k(k)
.cn_stride(5)
- .Test(xnn_qs8_gemm_minmax_fp32_ukernel_1x2__scalar_magic, xnn_init_qs8_conv_minmax_fp32_scalar_magic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qs8_gemm_minmax_fp32_ukernel_1x2__scalar_fmagic, xnn_init_qs8_conv_minmax_fp32_scalar_fmagic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
}
-TEST(QS8_GEMM_MINMAX_FP32_1X2__SCALAR_MAGIC, n_gt_2_strided_a) {
+TEST(QS8_GEMM_MINMAX_FP32_1X2__SCALAR_FMAGIC, n_gt_2_strided_a) {
for (uint32_t n = 3; n < 4; n++) {
for (size_t k = 1; k <= 5; k += 2) {
GemmMicrokernelTester()
@@ -96933,12 +96933,12 @@
.n(n)
.k(k)
.a_stride(7)
- .Test(xnn_qs8_gemm_minmax_fp32_ukernel_1x2__scalar_magic, xnn_init_qs8_conv_minmax_fp32_scalar_magic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qs8_gemm_minmax_fp32_ukernel_1x2__scalar_fmagic, xnn_init_qs8_conv_minmax_fp32_scalar_fmagic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
}
-TEST(QS8_GEMM_MINMAX_FP32_1X2__SCALAR_MAGIC, n_gt_2_subtile) {
+TEST(QS8_GEMM_MINMAX_FP32_1X2__SCALAR_FMAGIC, n_gt_2_subtile) {
for (uint32_t n = 3; n < 4; n++) {
for (size_t k = 1; k <= 5; k += 2) {
for (uint32_t m = 1; m <= 1; m++) {
@@ -96951,13 +96951,13 @@
.n(n)
.k(k)
.iterations(1)
- .Test(xnn_qs8_gemm_minmax_fp32_ukernel_1x2__scalar_magic, xnn_init_qs8_conv_minmax_fp32_scalar_magic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qs8_gemm_minmax_fp32_ukernel_1x2__scalar_fmagic, xnn_init_qs8_conv_minmax_fp32_scalar_fmagic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
}
}
-TEST(QS8_GEMM_MINMAX_FP32_1X2__SCALAR_MAGIC, n_div_2) {
+TEST(QS8_GEMM_MINMAX_FP32_1X2__SCALAR_FMAGIC, n_div_2) {
for (uint32_t n = 4; n <= 6; n += 2) {
for (size_t k = 1; k <= 5; k += 2) {
GemmMicrokernelTester()
@@ -96968,12 +96968,12 @@
.m(1)
.n(2)
.k(k)
- .Test(xnn_qs8_gemm_minmax_fp32_ukernel_1x2__scalar_magic, xnn_init_qs8_conv_minmax_fp32_scalar_magic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qs8_gemm_minmax_fp32_ukernel_1x2__scalar_fmagic, xnn_init_qs8_conv_minmax_fp32_scalar_fmagic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
}
-TEST(QS8_GEMM_MINMAX_FP32_1X2__SCALAR_MAGIC, n_div_2_strided_cn) {
+TEST(QS8_GEMM_MINMAX_FP32_1X2__SCALAR_FMAGIC, n_div_2_strided_cn) {
for (uint32_t n = 4; n <= 6; n += 2) {
for (size_t k = 1; k <= 5; k += 2) {
GemmMicrokernelTester()
@@ -96985,12 +96985,12 @@
.n(n)
.k(k)
.cn_stride(5)
- .Test(xnn_qs8_gemm_minmax_fp32_ukernel_1x2__scalar_magic, xnn_init_qs8_conv_minmax_fp32_scalar_magic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qs8_gemm_minmax_fp32_ukernel_1x2__scalar_fmagic, xnn_init_qs8_conv_minmax_fp32_scalar_fmagic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
}
-TEST(QS8_GEMM_MINMAX_FP32_1X2__SCALAR_MAGIC, n_div_2_strided_a) {
+TEST(QS8_GEMM_MINMAX_FP32_1X2__SCALAR_FMAGIC, n_div_2_strided_a) {
for (uint32_t n = 4; n <= 6; n += 2) {
for (size_t k = 1; k <= 5; k += 2) {
GemmMicrokernelTester()
@@ -97002,12 +97002,12 @@
.n(n)
.k(k)
.a_stride(7)
- .Test(xnn_qs8_gemm_minmax_fp32_ukernel_1x2__scalar_magic, xnn_init_qs8_conv_minmax_fp32_scalar_magic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qs8_gemm_minmax_fp32_ukernel_1x2__scalar_fmagic, xnn_init_qs8_conv_minmax_fp32_scalar_fmagic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
}
-TEST(QS8_GEMM_MINMAX_FP32_1X2__SCALAR_MAGIC, n_div_2_subtile) {
+TEST(QS8_GEMM_MINMAX_FP32_1X2__SCALAR_FMAGIC, n_div_2_subtile) {
for (uint32_t n = 4; n <= 6; n += 2) {
for (size_t k = 1; k <= 5; k += 2) {
for (uint32_t m = 1; m <= 1; m++) {
@@ -97020,13 +97020,13 @@
.n(n)
.k(k)
.iterations(1)
- .Test(xnn_qs8_gemm_minmax_fp32_ukernel_1x2__scalar_magic, xnn_init_qs8_conv_minmax_fp32_scalar_magic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qs8_gemm_minmax_fp32_ukernel_1x2__scalar_fmagic, xnn_init_qs8_conv_minmax_fp32_scalar_fmagic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
}
}
-TEST(QS8_GEMM_MINMAX_FP32_1X2__SCALAR_MAGIC, strided_cm_subtile) {
+TEST(QS8_GEMM_MINMAX_FP32_1X2__SCALAR_FMAGIC, strided_cm_subtile) {
for (size_t k = 1; k <= 5; k += 2) {
for (uint32_t m = 1; m <= 1; m++) {
for (uint32_t n = 1; n <= 2; n++) {
@@ -97040,13 +97040,13 @@
.k(k)
.cm_stride(5)
.iterations(1)
- .Test(xnn_qs8_gemm_minmax_fp32_ukernel_1x2__scalar_magic, xnn_init_qs8_conv_minmax_fp32_scalar_magic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qs8_gemm_minmax_fp32_ukernel_1x2__scalar_fmagic, xnn_init_qs8_conv_minmax_fp32_scalar_fmagic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
}
}
-TEST(QS8_GEMM_MINMAX_FP32_1X2__SCALAR_MAGIC, qmin) {
+TEST(QS8_GEMM_MINMAX_FP32_1X2__SCALAR_FMAGIC, qmin) {
GemmMicrokernelTester()
.mr(1)
.nr(2)
@@ -97056,10 +97056,10 @@
.n(2)
.k(1)
.qmin(128)
- .Test(xnn_qs8_gemm_minmax_fp32_ukernel_1x2__scalar_magic, xnn_init_qs8_conv_minmax_fp32_scalar_magic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qs8_gemm_minmax_fp32_ukernel_1x2__scalar_fmagic, xnn_init_qs8_conv_minmax_fp32_scalar_fmagic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
-TEST(QS8_GEMM_MINMAX_FP32_1X2__SCALAR_MAGIC, qmax) {
+TEST(QS8_GEMM_MINMAX_FP32_1X2__SCALAR_FMAGIC, qmax) {
GemmMicrokernelTester()
.mr(1)
.nr(2)
@@ -97069,10 +97069,10 @@
.n(2)
.k(1)
.qmax(128)
- .Test(xnn_qs8_gemm_minmax_fp32_ukernel_1x2__scalar_magic, xnn_init_qs8_conv_minmax_fp32_scalar_magic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qs8_gemm_minmax_fp32_ukernel_1x2__scalar_fmagic, xnn_init_qs8_conv_minmax_fp32_scalar_fmagic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
-TEST(QS8_GEMM_MINMAX_FP32_1X2__SCALAR_MAGIC, strided_cm) {
+TEST(QS8_GEMM_MINMAX_FP32_1X2__SCALAR_FMAGIC, strided_cm) {
GemmMicrokernelTester()
.mr(1)
.nr(2)
@@ -97082,11 +97082,11 @@
.n(2)
.k(1)
.cm_stride(5)
- .Test(xnn_qs8_gemm_minmax_fp32_ukernel_1x2__scalar_magic, xnn_init_qs8_conv_minmax_fp32_scalar_magic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qs8_gemm_minmax_fp32_ukernel_1x2__scalar_fmagic, xnn_init_qs8_conv_minmax_fp32_scalar_fmagic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
-TEST(QS8_GEMM_MINMAX_FP32_2X2__SCALAR_MAGIC, k_eq_1) {
+TEST(QS8_GEMM_MINMAX_FP32_2X2__SCALAR_FMAGIC, k_eq_1) {
GemmMicrokernelTester()
.mr(2)
.nr(2)
@@ -97095,10 +97095,10 @@
.m(2)
.n(2)
.k(1)
- .Test(xnn_qs8_gemm_minmax_fp32_ukernel_2x2__scalar_magic, xnn_init_qs8_conv_minmax_fp32_scalar_magic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qs8_gemm_minmax_fp32_ukernel_2x2__scalar_fmagic, xnn_init_qs8_conv_minmax_fp32_scalar_fmagic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
-TEST(QS8_GEMM_MINMAX_FP32_2X2__SCALAR_MAGIC, strided_cn) {
+TEST(QS8_GEMM_MINMAX_FP32_2X2__SCALAR_FMAGIC, strided_cn) {
GemmMicrokernelTester()
.mr(2)
.nr(2)
@@ -97108,10 +97108,10 @@
.n(2)
.k(1)
.cn_stride(5)
- .Test(xnn_qs8_gemm_minmax_fp32_ukernel_2x2__scalar_magic, xnn_init_qs8_conv_minmax_fp32_scalar_magic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qs8_gemm_minmax_fp32_ukernel_2x2__scalar_fmagic, xnn_init_qs8_conv_minmax_fp32_scalar_fmagic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
-TEST(QS8_GEMM_MINMAX_FP32_2X2__SCALAR_MAGIC, k_eq_1_strided_a) {
+TEST(QS8_GEMM_MINMAX_FP32_2X2__SCALAR_FMAGIC, k_eq_1_strided_a) {
GemmMicrokernelTester()
.mr(2)
.nr(2)
@@ -97121,10 +97121,10 @@
.n(2)
.k(1)
.a_stride(3)
- .Test(xnn_qs8_gemm_minmax_fp32_ukernel_2x2__scalar_magic, xnn_init_qs8_conv_minmax_fp32_scalar_magic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qs8_gemm_minmax_fp32_ukernel_2x2__scalar_fmagic, xnn_init_qs8_conv_minmax_fp32_scalar_fmagic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
-TEST(QS8_GEMM_MINMAX_FP32_2X2__SCALAR_MAGIC, k_eq_1_subtile) {
+TEST(QS8_GEMM_MINMAX_FP32_2X2__SCALAR_FMAGIC, k_eq_1_subtile) {
for (uint32_t m = 1; m <= 2; m++) {
for (uint32_t n = 1; n <= 2; n++) {
GemmMicrokernelTester()
@@ -97136,12 +97136,12 @@
.n(n)
.k(1)
.iterations(1)
- .Test(xnn_qs8_gemm_minmax_fp32_ukernel_2x2__scalar_magic, xnn_init_qs8_conv_minmax_fp32_scalar_magic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qs8_gemm_minmax_fp32_ukernel_2x2__scalar_fmagic, xnn_init_qs8_conv_minmax_fp32_scalar_fmagic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
}
-TEST(QS8_GEMM_MINMAX_FP32_2X2__SCALAR_MAGIC, k_eq_1_subtile_m) {
+TEST(QS8_GEMM_MINMAX_FP32_2X2__SCALAR_FMAGIC, k_eq_1_subtile_m) {
for (uint32_t m = 1; m <= 2; m++) {
GemmMicrokernelTester()
.mr(2)
@@ -97152,11 +97152,11 @@
.n(2)
.k(1)
.iterations(1)
- .Test(xnn_qs8_gemm_minmax_fp32_ukernel_2x2__scalar_magic, xnn_init_qs8_conv_minmax_fp32_scalar_magic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qs8_gemm_minmax_fp32_ukernel_2x2__scalar_fmagic, xnn_init_qs8_conv_minmax_fp32_scalar_fmagic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
-TEST(QS8_GEMM_MINMAX_FP32_2X2__SCALAR_MAGIC, k_eq_1_subtile_n) {
+TEST(QS8_GEMM_MINMAX_FP32_2X2__SCALAR_FMAGIC, k_eq_1_subtile_n) {
for (uint32_t n = 1; n <= 2; n++) {
GemmMicrokernelTester()
.mr(2)
@@ -97167,11 +97167,11 @@
.n(n)
.k(1)
.iterations(1)
- .Test(xnn_qs8_gemm_minmax_fp32_ukernel_2x2__scalar_magic, xnn_init_qs8_conv_minmax_fp32_scalar_magic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qs8_gemm_minmax_fp32_ukernel_2x2__scalar_fmagic, xnn_init_qs8_conv_minmax_fp32_scalar_fmagic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
-TEST(QS8_GEMM_MINMAX_FP32_2X2__SCALAR_MAGIC, k_gt_1) {
+TEST(QS8_GEMM_MINMAX_FP32_2X2__SCALAR_FMAGIC, k_gt_1) {
for (size_t k = 2; k < 10; k++) {
GemmMicrokernelTester()
.mr(2)
@@ -97181,11 +97181,11 @@
.m(2)
.n(2)
.k(k)
- .Test(xnn_qs8_gemm_minmax_fp32_ukernel_2x2__scalar_magic, xnn_init_qs8_conv_minmax_fp32_scalar_magic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qs8_gemm_minmax_fp32_ukernel_2x2__scalar_fmagic, xnn_init_qs8_conv_minmax_fp32_scalar_fmagic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
-TEST(QS8_GEMM_MINMAX_FP32_2X2__SCALAR_MAGIC, k_gt_1_strided_a) {
+TEST(QS8_GEMM_MINMAX_FP32_2X2__SCALAR_FMAGIC, k_gt_1_strided_a) {
for (size_t k = 2; k < 10; k++) {
GemmMicrokernelTester()
.mr(2)
@@ -97196,11 +97196,11 @@
.n(2)
.k(k)
.a_stride(11)
- .Test(xnn_qs8_gemm_minmax_fp32_ukernel_2x2__scalar_magic, xnn_init_qs8_conv_minmax_fp32_scalar_magic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qs8_gemm_minmax_fp32_ukernel_2x2__scalar_fmagic, xnn_init_qs8_conv_minmax_fp32_scalar_fmagic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
-TEST(QS8_GEMM_MINMAX_FP32_2X2__SCALAR_MAGIC, k_gt_1_subtile) {
+TEST(QS8_GEMM_MINMAX_FP32_2X2__SCALAR_FMAGIC, k_gt_1_subtile) {
for (size_t k = 2; k < 10; k++) {
for (uint32_t m = 1; m <= 2; m++) {
for (uint32_t n = 1; n <= 2; n++) {
@@ -97213,13 +97213,13 @@
.n(n)
.k(k)
.iterations(1)
- .Test(xnn_qs8_gemm_minmax_fp32_ukernel_2x2__scalar_magic, xnn_init_qs8_conv_minmax_fp32_scalar_magic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qs8_gemm_minmax_fp32_ukernel_2x2__scalar_fmagic, xnn_init_qs8_conv_minmax_fp32_scalar_fmagic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
}
}
-TEST(QS8_GEMM_MINMAX_FP32_2X2__SCALAR_MAGIC, n_gt_2) {
+TEST(QS8_GEMM_MINMAX_FP32_2X2__SCALAR_FMAGIC, n_gt_2) {
for (uint32_t n = 3; n < 4; n++) {
for (size_t k = 1; k <= 5; k += 2) {
GemmMicrokernelTester()
@@ -97230,12 +97230,12 @@
.m(2)
.n(2)
.k(k)
- .Test(xnn_qs8_gemm_minmax_fp32_ukernel_2x2__scalar_magic, xnn_init_qs8_conv_minmax_fp32_scalar_magic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qs8_gemm_minmax_fp32_ukernel_2x2__scalar_fmagic, xnn_init_qs8_conv_minmax_fp32_scalar_fmagic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
}
-TEST(QS8_GEMM_MINMAX_FP32_2X2__SCALAR_MAGIC, n_gt_2_strided_cn) {
+TEST(QS8_GEMM_MINMAX_FP32_2X2__SCALAR_FMAGIC, n_gt_2_strided_cn) {
for (uint32_t n = 3; n < 4; n++) {
for (size_t k = 1; k <= 5; k += 2) {
GemmMicrokernelTester()
@@ -97247,12 +97247,12 @@
.n(2)
.k(k)
.cn_stride(5)
- .Test(xnn_qs8_gemm_minmax_fp32_ukernel_2x2__scalar_magic, xnn_init_qs8_conv_minmax_fp32_scalar_magic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qs8_gemm_minmax_fp32_ukernel_2x2__scalar_fmagic, xnn_init_qs8_conv_minmax_fp32_scalar_fmagic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
}
-TEST(QS8_GEMM_MINMAX_FP32_2X2__SCALAR_MAGIC, n_gt_2_strided_a) {
+TEST(QS8_GEMM_MINMAX_FP32_2X2__SCALAR_FMAGIC, n_gt_2_strided_a) {
for (uint32_t n = 3; n < 4; n++) {
for (size_t k = 1; k <= 5; k += 2) {
GemmMicrokernelTester()
@@ -97264,12 +97264,12 @@
.n(n)
.k(k)
.a_stride(7)
- .Test(xnn_qs8_gemm_minmax_fp32_ukernel_2x2__scalar_magic, xnn_init_qs8_conv_minmax_fp32_scalar_magic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qs8_gemm_minmax_fp32_ukernel_2x2__scalar_fmagic, xnn_init_qs8_conv_minmax_fp32_scalar_fmagic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
}
-TEST(QS8_GEMM_MINMAX_FP32_2X2__SCALAR_MAGIC, n_gt_2_subtile) {
+TEST(QS8_GEMM_MINMAX_FP32_2X2__SCALAR_FMAGIC, n_gt_2_subtile) {
for (uint32_t n = 3; n < 4; n++) {
for (size_t k = 1; k <= 5; k += 2) {
for (uint32_t m = 1; m <= 2; m++) {
@@ -97282,13 +97282,13 @@
.n(n)
.k(k)
.iterations(1)
- .Test(xnn_qs8_gemm_minmax_fp32_ukernel_2x2__scalar_magic, xnn_init_qs8_conv_minmax_fp32_scalar_magic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qs8_gemm_minmax_fp32_ukernel_2x2__scalar_fmagic, xnn_init_qs8_conv_minmax_fp32_scalar_fmagic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
}
}
-TEST(QS8_GEMM_MINMAX_FP32_2X2__SCALAR_MAGIC, n_div_2) {
+TEST(QS8_GEMM_MINMAX_FP32_2X2__SCALAR_FMAGIC, n_div_2) {
for (uint32_t n = 4; n <= 6; n += 2) {
for (size_t k = 1; k <= 5; k += 2) {
GemmMicrokernelTester()
@@ -97299,12 +97299,12 @@
.m(2)
.n(2)
.k(k)
- .Test(xnn_qs8_gemm_minmax_fp32_ukernel_2x2__scalar_magic, xnn_init_qs8_conv_minmax_fp32_scalar_magic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qs8_gemm_minmax_fp32_ukernel_2x2__scalar_fmagic, xnn_init_qs8_conv_minmax_fp32_scalar_fmagic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
}
-TEST(QS8_GEMM_MINMAX_FP32_2X2__SCALAR_MAGIC, n_div_2_strided_cn) {
+TEST(QS8_GEMM_MINMAX_FP32_2X2__SCALAR_FMAGIC, n_div_2_strided_cn) {
for (uint32_t n = 4; n <= 6; n += 2) {
for (size_t k = 1; k <= 5; k += 2) {
GemmMicrokernelTester()
@@ -97316,12 +97316,12 @@
.n(n)
.k(k)
.cn_stride(5)
- .Test(xnn_qs8_gemm_minmax_fp32_ukernel_2x2__scalar_magic, xnn_init_qs8_conv_minmax_fp32_scalar_magic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qs8_gemm_minmax_fp32_ukernel_2x2__scalar_fmagic, xnn_init_qs8_conv_minmax_fp32_scalar_fmagic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
}
-TEST(QS8_GEMM_MINMAX_FP32_2X2__SCALAR_MAGIC, n_div_2_strided_a) {
+TEST(QS8_GEMM_MINMAX_FP32_2X2__SCALAR_FMAGIC, n_div_2_strided_a) {
for (uint32_t n = 4; n <= 6; n += 2) {
for (size_t k = 1; k <= 5; k += 2) {
GemmMicrokernelTester()
@@ -97333,12 +97333,12 @@
.n(n)
.k(k)
.a_stride(7)
- .Test(xnn_qs8_gemm_minmax_fp32_ukernel_2x2__scalar_magic, xnn_init_qs8_conv_minmax_fp32_scalar_magic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qs8_gemm_minmax_fp32_ukernel_2x2__scalar_fmagic, xnn_init_qs8_conv_minmax_fp32_scalar_fmagic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
}
-TEST(QS8_GEMM_MINMAX_FP32_2X2__SCALAR_MAGIC, n_div_2_subtile) {
+TEST(QS8_GEMM_MINMAX_FP32_2X2__SCALAR_FMAGIC, n_div_2_subtile) {
for (uint32_t n = 4; n <= 6; n += 2) {
for (size_t k = 1; k <= 5; k += 2) {
for (uint32_t m = 1; m <= 2; m++) {
@@ -97351,13 +97351,13 @@
.n(n)
.k(k)
.iterations(1)
- .Test(xnn_qs8_gemm_minmax_fp32_ukernel_2x2__scalar_magic, xnn_init_qs8_conv_minmax_fp32_scalar_magic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qs8_gemm_minmax_fp32_ukernel_2x2__scalar_fmagic, xnn_init_qs8_conv_minmax_fp32_scalar_fmagic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
}
}
-TEST(QS8_GEMM_MINMAX_FP32_2X2__SCALAR_MAGIC, strided_cm_subtile) {
+TEST(QS8_GEMM_MINMAX_FP32_2X2__SCALAR_FMAGIC, strided_cm_subtile) {
for (size_t k = 1; k <= 5; k += 2) {
for (uint32_t m = 1; m <= 2; m++) {
for (uint32_t n = 1; n <= 2; n++) {
@@ -97371,13 +97371,13 @@
.k(k)
.cm_stride(5)
.iterations(1)
- .Test(xnn_qs8_gemm_minmax_fp32_ukernel_2x2__scalar_magic, xnn_init_qs8_conv_minmax_fp32_scalar_magic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qs8_gemm_minmax_fp32_ukernel_2x2__scalar_fmagic, xnn_init_qs8_conv_minmax_fp32_scalar_fmagic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
}
}
-TEST(QS8_GEMM_MINMAX_FP32_2X2__SCALAR_MAGIC, qmin) {
+TEST(QS8_GEMM_MINMAX_FP32_2X2__SCALAR_FMAGIC, qmin) {
GemmMicrokernelTester()
.mr(2)
.nr(2)
@@ -97387,10 +97387,10 @@
.n(2)
.k(1)
.qmin(128)
- .Test(xnn_qs8_gemm_minmax_fp32_ukernel_2x2__scalar_magic, xnn_init_qs8_conv_minmax_fp32_scalar_magic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qs8_gemm_minmax_fp32_ukernel_2x2__scalar_fmagic, xnn_init_qs8_conv_minmax_fp32_scalar_fmagic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
-TEST(QS8_GEMM_MINMAX_FP32_2X2__SCALAR_MAGIC, qmax) {
+TEST(QS8_GEMM_MINMAX_FP32_2X2__SCALAR_FMAGIC, qmax) {
GemmMicrokernelTester()
.mr(2)
.nr(2)
@@ -97400,10 +97400,10 @@
.n(2)
.k(1)
.qmax(128)
- .Test(xnn_qs8_gemm_minmax_fp32_ukernel_2x2__scalar_magic, xnn_init_qs8_conv_minmax_fp32_scalar_magic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qs8_gemm_minmax_fp32_ukernel_2x2__scalar_fmagic, xnn_init_qs8_conv_minmax_fp32_scalar_fmagic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
-TEST(QS8_GEMM_MINMAX_FP32_2X2__SCALAR_MAGIC, strided_cm) {
+TEST(QS8_GEMM_MINMAX_FP32_2X2__SCALAR_FMAGIC, strided_cm) {
GemmMicrokernelTester()
.mr(2)
.nr(2)
@@ -97413,11 +97413,11 @@
.n(2)
.k(1)
.cm_stride(5)
- .Test(xnn_qs8_gemm_minmax_fp32_ukernel_2x2__scalar_magic, xnn_init_qs8_conv_minmax_fp32_scalar_magic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qs8_gemm_minmax_fp32_ukernel_2x2__scalar_fmagic, xnn_init_qs8_conv_minmax_fp32_scalar_fmagic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
-TEST(QS8_GEMM_MINMAX_FP32_3X2__SCALAR_MAGIC, k_eq_1) {
+TEST(QS8_GEMM_MINMAX_FP32_3X2__SCALAR_FMAGIC, k_eq_1) {
GemmMicrokernelTester()
.mr(3)
.nr(2)
@@ -97426,10 +97426,10 @@
.m(3)
.n(2)
.k(1)
- .Test(xnn_qs8_gemm_minmax_fp32_ukernel_3x2__scalar_magic, xnn_init_qs8_conv_minmax_fp32_scalar_magic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qs8_gemm_minmax_fp32_ukernel_3x2__scalar_fmagic, xnn_init_qs8_conv_minmax_fp32_scalar_fmagic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
-TEST(QS8_GEMM_MINMAX_FP32_3X2__SCALAR_MAGIC, strided_cn) {
+TEST(QS8_GEMM_MINMAX_FP32_3X2__SCALAR_FMAGIC, strided_cn) {
GemmMicrokernelTester()
.mr(3)
.nr(2)
@@ -97439,10 +97439,10 @@
.n(2)
.k(1)
.cn_stride(5)
- .Test(xnn_qs8_gemm_minmax_fp32_ukernel_3x2__scalar_magic, xnn_init_qs8_conv_minmax_fp32_scalar_magic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qs8_gemm_minmax_fp32_ukernel_3x2__scalar_fmagic, xnn_init_qs8_conv_minmax_fp32_scalar_fmagic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
-TEST(QS8_GEMM_MINMAX_FP32_3X2__SCALAR_MAGIC, k_eq_1_strided_a) {
+TEST(QS8_GEMM_MINMAX_FP32_3X2__SCALAR_FMAGIC, k_eq_1_strided_a) {
GemmMicrokernelTester()
.mr(3)
.nr(2)
@@ -97452,10 +97452,10 @@
.n(2)
.k(1)
.a_stride(3)
- .Test(xnn_qs8_gemm_minmax_fp32_ukernel_3x2__scalar_magic, xnn_init_qs8_conv_minmax_fp32_scalar_magic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qs8_gemm_minmax_fp32_ukernel_3x2__scalar_fmagic, xnn_init_qs8_conv_minmax_fp32_scalar_fmagic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
-TEST(QS8_GEMM_MINMAX_FP32_3X2__SCALAR_MAGIC, k_eq_1_subtile) {
+TEST(QS8_GEMM_MINMAX_FP32_3X2__SCALAR_FMAGIC, k_eq_1_subtile) {
for (uint32_t m = 1; m <= 3; m++) {
for (uint32_t n = 1; n <= 2; n++) {
GemmMicrokernelTester()
@@ -97467,12 +97467,12 @@
.n(n)
.k(1)
.iterations(1)
- .Test(xnn_qs8_gemm_minmax_fp32_ukernel_3x2__scalar_magic, xnn_init_qs8_conv_minmax_fp32_scalar_magic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qs8_gemm_minmax_fp32_ukernel_3x2__scalar_fmagic, xnn_init_qs8_conv_minmax_fp32_scalar_fmagic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
}
-TEST(QS8_GEMM_MINMAX_FP32_3X2__SCALAR_MAGIC, k_eq_1_subtile_m) {
+TEST(QS8_GEMM_MINMAX_FP32_3X2__SCALAR_FMAGIC, k_eq_1_subtile_m) {
for (uint32_t m = 1; m <= 3; m++) {
GemmMicrokernelTester()
.mr(3)
@@ -97483,11 +97483,11 @@
.n(2)
.k(1)
.iterations(1)
- .Test(xnn_qs8_gemm_minmax_fp32_ukernel_3x2__scalar_magic, xnn_init_qs8_conv_minmax_fp32_scalar_magic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qs8_gemm_minmax_fp32_ukernel_3x2__scalar_fmagic, xnn_init_qs8_conv_minmax_fp32_scalar_fmagic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
-TEST(QS8_GEMM_MINMAX_FP32_3X2__SCALAR_MAGIC, k_eq_1_subtile_n) {
+TEST(QS8_GEMM_MINMAX_FP32_3X2__SCALAR_FMAGIC, k_eq_1_subtile_n) {
for (uint32_t n = 1; n <= 2; n++) {
GemmMicrokernelTester()
.mr(3)
@@ -97498,11 +97498,11 @@
.n(n)
.k(1)
.iterations(1)
- .Test(xnn_qs8_gemm_minmax_fp32_ukernel_3x2__scalar_magic, xnn_init_qs8_conv_minmax_fp32_scalar_magic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qs8_gemm_minmax_fp32_ukernel_3x2__scalar_fmagic, xnn_init_qs8_conv_minmax_fp32_scalar_fmagic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
-TEST(QS8_GEMM_MINMAX_FP32_3X2__SCALAR_MAGIC, k_gt_1) {
+TEST(QS8_GEMM_MINMAX_FP32_3X2__SCALAR_FMAGIC, k_gt_1) {
for (size_t k = 2; k < 10; k++) {
GemmMicrokernelTester()
.mr(3)
@@ -97512,11 +97512,11 @@
.m(3)
.n(2)
.k(k)
- .Test(xnn_qs8_gemm_minmax_fp32_ukernel_3x2__scalar_magic, xnn_init_qs8_conv_minmax_fp32_scalar_magic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qs8_gemm_minmax_fp32_ukernel_3x2__scalar_fmagic, xnn_init_qs8_conv_minmax_fp32_scalar_fmagic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
-TEST(QS8_GEMM_MINMAX_FP32_3X2__SCALAR_MAGIC, k_gt_1_strided_a) {
+TEST(QS8_GEMM_MINMAX_FP32_3X2__SCALAR_FMAGIC, k_gt_1_strided_a) {
for (size_t k = 2; k < 10; k++) {
GemmMicrokernelTester()
.mr(3)
@@ -97527,11 +97527,11 @@
.n(2)
.k(k)
.a_stride(11)
- .Test(xnn_qs8_gemm_minmax_fp32_ukernel_3x2__scalar_magic, xnn_init_qs8_conv_minmax_fp32_scalar_magic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qs8_gemm_minmax_fp32_ukernel_3x2__scalar_fmagic, xnn_init_qs8_conv_minmax_fp32_scalar_fmagic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
-TEST(QS8_GEMM_MINMAX_FP32_3X2__SCALAR_MAGIC, k_gt_1_subtile) {
+TEST(QS8_GEMM_MINMAX_FP32_3X2__SCALAR_FMAGIC, k_gt_1_subtile) {
for (size_t k = 2; k < 10; k++) {
for (uint32_t m = 1; m <= 3; m++) {
for (uint32_t n = 1; n <= 2; n++) {
@@ -97544,13 +97544,13 @@
.n(n)
.k(k)
.iterations(1)
- .Test(xnn_qs8_gemm_minmax_fp32_ukernel_3x2__scalar_magic, xnn_init_qs8_conv_minmax_fp32_scalar_magic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qs8_gemm_minmax_fp32_ukernel_3x2__scalar_fmagic, xnn_init_qs8_conv_minmax_fp32_scalar_fmagic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
}
}
-TEST(QS8_GEMM_MINMAX_FP32_3X2__SCALAR_MAGIC, n_gt_2) {
+TEST(QS8_GEMM_MINMAX_FP32_3X2__SCALAR_FMAGIC, n_gt_2) {
for (uint32_t n = 3; n < 4; n++) {
for (size_t k = 1; k <= 5; k += 2) {
GemmMicrokernelTester()
@@ -97561,12 +97561,12 @@
.m(3)
.n(2)
.k(k)
- .Test(xnn_qs8_gemm_minmax_fp32_ukernel_3x2__scalar_magic, xnn_init_qs8_conv_minmax_fp32_scalar_magic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qs8_gemm_minmax_fp32_ukernel_3x2__scalar_fmagic, xnn_init_qs8_conv_minmax_fp32_scalar_fmagic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
}
-TEST(QS8_GEMM_MINMAX_FP32_3X2__SCALAR_MAGIC, n_gt_2_strided_cn) {
+TEST(QS8_GEMM_MINMAX_FP32_3X2__SCALAR_FMAGIC, n_gt_2_strided_cn) {
for (uint32_t n = 3; n < 4; n++) {
for (size_t k = 1; k <= 5; k += 2) {
GemmMicrokernelTester()
@@ -97578,12 +97578,12 @@
.n(2)
.k(k)
.cn_stride(5)
- .Test(xnn_qs8_gemm_minmax_fp32_ukernel_3x2__scalar_magic, xnn_init_qs8_conv_minmax_fp32_scalar_magic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qs8_gemm_minmax_fp32_ukernel_3x2__scalar_fmagic, xnn_init_qs8_conv_minmax_fp32_scalar_fmagic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
}
-TEST(QS8_GEMM_MINMAX_FP32_3X2__SCALAR_MAGIC, n_gt_2_strided_a) {
+TEST(QS8_GEMM_MINMAX_FP32_3X2__SCALAR_FMAGIC, n_gt_2_strided_a) {
for (uint32_t n = 3; n < 4; n++) {
for (size_t k = 1; k <= 5; k += 2) {
GemmMicrokernelTester()
@@ -97595,12 +97595,12 @@
.n(n)
.k(k)
.a_stride(7)
- .Test(xnn_qs8_gemm_minmax_fp32_ukernel_3x2__scalar_magic, xnn_init_qs8_conv_minmax_fp32_scalar_magic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qs8_gemm_minmax_fp32_ukernel_3x2__scalar_fmagic, xnn_init_qs8_conv_minmax_fp32_scalar_fmagic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
}
-TEST(QS8_GEMM_MINMAX_FP32_3X2__SCALAR_MAGIC, n_gt_2_subtile) {
+TEST(QS8_GEMM_MINMAX_FP32_3X2__SCALAR_FMAGIC, n_gt_2_subtile) {
for (uint32_t n = 3; n < 4; n++) {
for (size_t k = 1; k <= 5; k += 2) {
for (uint32_t m = 1; m <= 3; m++) {
@@ -97613,13 +97613,13 @@
.n(n)
.k(k)
.iterations(1)
- .Test(xnn_qs8_gemm_minmax_fp32_ukernel_3x2__scalar_magic, xnn_init_qs8_conv_minmax_fp32_scalar_magic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qs8_gemm_minmax_fp32_ukernel_3x2__scalar_fmagic, xnn_init_qs8_conv_minmax_fp32_scalar_fmagic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
}
}
-TEST(QS8_GEMM_MINMAX_FP32_3X2__SCALAR_MAGIC, n_div_2) {
+TEST(QS8_GEMM_MINMAX_FP32_3X2__SCALAR_FMAGIC, n_div_2) {
for (uint32_t n = 4; n <= 6; n += 2) {
for (size_t k = 1; k <= 5; k += 2) {
GemmMicrokernelTester()
@@ -97630,12 +97630,12 @@
.m(3)
.n(2)
.k(k)
- .Test(xnn_qs8_gemm_minmax_fp32_ukernel_3x2__scalar_magic, xnn_init_qs8_conv_minmax_fp32_scalar_magic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qs8_gemm_minmax_fp32_ukernel_3x2__scalar_fmagic, xnn_init_qs8_conv_minmax_fp32_scalar_fmagic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
}
-TEST(QS8_GEMM_MINMAX_FP32_3X2__SCALAR_MAGIC, n_div_2_strided_cn) {
+TEST(QS8_GEMM_MINMAX_FP32_3X2__SCALAR_FMAGIC, n_div_2_strided_cn) {
for (uint32_t n = 4; n <= 6; n += 2) {
for (size_t k = 1; k <= 5; k += 2) {
GemmMicrokernelTester()
@@ -97647,12 +97647,12 @@
.n(n)
.k(k)
.cn_stride(5)
- .Test(xnn_qs8_gemm_minmax_fp32_ukernel_3x2__scalar_magic, xnn_init_qs8_conv_minmax_fp32_scalar_magic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qs8_gemm_minmax_fp32_ukernel_3x2__scalar_fmagic, xnn_init_qs8_conv_minmax_fp32_scalar_fmagic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
}
-TEST(QS8_GEMM_MINMAX_FP32_3X2__SCALAR_MAGIC, n_div_2_strided_a) {
+TEST(QS8_GEMM_MINMAX_FP32_3X2__SCALAR_FMAGIC, n_div_2_strided_a) {
for (uint32_t n = 4; n <= 6; n += 2) {
for (size_t k = 1; k <= 5; k += 2) {
GemmMicrokernelTester()
@@ -97664,12 +97664,12 @@
.n(n)
.k(k)
.a_stride(7)
- .Test(xnn_qs8_gemm_minmax_fp32_ukernel_3x2__scalar_magic, xnn_init_qs8_conv_minmax_fp32_scalar_magic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qs8_gemm_minmax_fp32_ukernel_3x2__scalar_fmagic, xnn_init_qs8_conv_minmax_fp32_scalar_fmagic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
}
-TEST(QS8_GEMM_MINMAX_FP32_3X2__SCALAR_MAGIC, n_div_2_subtile) {
+TEST(QS8_GEMM_MINMAX_FP32_3X2__SCALAR_FMAGIC, n_div_2_subtile) {
for (uint32_t n = 4; n <= 6; n += 2) {
for (size_t k = 1; k <= 5; k += 2) {
for (uint32_t m = 1; m <= 3; m++) {
@@ -97682,13 +97682,13 @@
.n(n)
.k(k)
.iterations(1)
- .Test(xnn_qs8_gemm_minmax_fp32_ukernel_3x2__scalar_magic, xnn_init_qs8_conv_minmax_fp32_scalar_magic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qs8_gemm_minmax_fp32_ukernel_3x2__scalar_fmagic, xnn_init_qs8_conv_minmax_fp32_scalar_fmagic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
}
}
-TEST(QS8_GEMM_MINMAX_FP32_3X2__SCALAR_MAGIC, strided_cm_subtile) {
+TEST(QS8_GEMM_MINMAX_FP32_3X2__SCALAR_FMAGIC, strided_cm_subtile) {
for (size_t k = 1; k <= 5; k += 2) {
for (uint32_t m = 1; m <= 3; m++) {
for (uint32_t n = 1; n <= 2; n++) {
@@ -97702,13 +97702,13 @@
.k(k)
.cm_stride(5)
.iterations(1)
- .Test(xnn_qs8_gemm_minmax_fp32_ukernel_3x2__scalar_magic, xnn_init_qs8_conv_minmax_fp32_scalar_magic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qs8_gemm_minmax_fp32_ukernel_3x2__scalar_fmagic, xnn_init_qs8_conv_minmax_fp32_scalar_fmagic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
}
}
-TEST(QS8_GEMM_MINMAX_FP32_3X2__SCALAR_MAGIC, qmin) {
+TEST(QS8_GEMM_MINMAX_FP32_3X2__SCALAR_FMAGIC, qmin) {
GemmMicrokernelTester()
.mr(3)
.nr(2)
@@ -97718,10 +97718,10 @@
.n(2)
.k(1)
.qmin(128)
- .Test(xnn_qs8_gemm_minmax_fp32_ukernel_3x2__scalar_magic, xnn_init_qs8_conv_minmax_fp32_scalar_magic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qs8_gemm_minmax_fp32_ukernel_3x2__scalar_fmagic, xnn_init_qs8_conv_minmax_fp32_scalar_fmagic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
-TEST(QS8_GEMM_MINMAX_FP32_3X2__SCALAR_MAGIC, qmax) {
+TEST(QS8_GEMM_MINMAX_FP32_3X2__SCALAR_FMAGIC, qmax) {
GemmMicrokernelTester()
.mr(3)
.nr(2)
@@ -97731,10 +97731,10 @@
.n(2)
.k(1)
.qmax(128)
- .Test(xnn_qs8_gemm_minmax_fp32_ukernel_3x2__scalar_magic, xnn_init_qs8_conv_minmax_fp32_scalar_magic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qs8_gemm_minmax_fp32_ukernel_3x2__scalar_fmagic, xnn_init_qs8_conv_minmax_fp32_scalar_fmagic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
-TEST(QS8_GEMM_MINMAX_FP32_3X2__SCALAR_MAGIC, strided_cm) {
+TEST(QS8_GEMM_MINMAX_FP32_3X2__SCALAR_FMAGIC, strided_cm) {
GemmMicrokernelTester()
.mr(3)
.nr(2)
@@ -97744,11 +97744,11 @@
.n(2)
.k(1)
.cm_stride(5)
- .Test(xnn_qs8_gemm_minmax_fp32_ukernel_3x2__scalar_magic, xnn_init_qs8_conv_minmax_fp32_scalar_magic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qs8_gemm_minmax_fp32_ukernel_3x2__scalar_fmagic, xnn_init_qs8_conv_minmax_fp32_scalar_fmagic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
-TEST(QS8_GEMM_MINMAX_FP32_4X2__SCALAR_MAGIC, k_eq_1) {
+TEST(QS8_GEMM_MINMAX_FP32_4X2__SCALAR_FMAGIC, k_eq_1) {
GemmMicrokernelTester()
.mr(4)
.nr(2)
@@ -97757,10 +97757,10 @@
.m(4)
.n(2)
.k(1)
- .Test(xnn_qs8_gemm_minmax_fp32_ukernel_4x2__scalar_magic, xnn_init_qs8_conv_minmax_fp32_scalar_magic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qs8_gemm_minmax_fp32_ukernel_4x2__scalar_fmagic, xnn_init_qs8_conv_minmax_fp32_scalar_fmagic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
-TEST(QS8_GEMM_MINMAX_FP32_4X2__SCALAR_MAGIC, strided_cn) {
+TEST(QS8_GEMM_MINMAX_FP32_4X2__SCALAR_FMAGIC, strided_cn) {
GemmMicrokernelTester()
.mr(4)
.nr(2)
@@ -97770,10 +97770,10 @@
.n(2)
.k(1)
.cn_stride(5)
- .Test(xnn_qs8_gemm_minmax_fp32_ukernel_4x2__scalar_magic, xnn_init_qs8_conv_minmax_fp32_scalar_magic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qs8_gemm_minmax_fp32_ukernel_4x2__scalar_fmagic, xnn_init_qs8_conv_minmax_fp32_scalar_fmagic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
-TEST(QS8_GEMM_MINMAX_FP32_4X2__SCALAR_MAGIC, k_eq_1_strided_a) {
+TEST(QS8_GEMM_MINMAX_FP32_4X2__SCALAR_FMAGIC, k_eq_1_strided_a) {
GemmMicrokernelTester()
.mr(4)
.nr(2)
@@ -97783,10 +97783,10 @@
.n(2)
.k(1)
.a_stride(3)
- .Test(xnn_qs8_gemm_minmax_fp32_ukernel_4x2__scalar_magic, xnn_init_qs8_conv_minmax_fp32_scalar_magic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qs8_gemm_minmax_fp32_ukernel_4x2__scalar_fmagic, xnn_init_qs8_conv_minmax_fp32_scalar_fmagic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
-TEST(QS8_GEMM_MINMAX_FP32_4X2__SCALAR_MAGIC, k_eq_1_subtile) {
+TEST(QS8_GEMM_MINMAX_FP32_4X2__SCALAR_FMAGIC, k_eq_1_subtile) {
for (uint32_t m = 1; m <= 4; m++) {
for (uint32_t n = 1; n <= 2; n++) {
GemmMicrokernelTester()
@@ -97798,12 +97798,12 @@
.n(n)
.k(1)
.iterations(1)
- .Test(xnn_qs8_gemm_minmax_fp32_ukernel_4x2__scalar_magic, xnn_init_qs8_conv_minmax_fp32_scalar_magic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qs8_gemm_minmax_fp32_ukernel_4x2__scalar_fmagic, xnn_init_qs8_conv_minmax_fp32_scalar_fmagic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
}
-TEST(QS8_GEMM_MINMAX_FP32_4X2__SCALAR_MAGIC, k_eq_1_subtile_m) {
+TEST(QS8_GEMM_MINMAX_FP32_4X2__SCALAR_FMAGIC, k_eq_1_subtile_m) {
for (uint32_t m = 1; m <= 4; m++) {
GemmMicrokernelTester()
.mr(4)
@@ -97814,11 +97814,11 @@
.n(2)
.k(1)
.iterations(1)
- .Test(xnn_qs8_gemm_minmax_fp32_ukernel_4x2__scalar_magic, xnn_init_qs8_conv_minmax_fp32_scalar_magic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qs8_gemm_minmax_fp32_ukernel_4x2__scalar_fmagic, xnn_init_qs8_conv_minmax_fp32_scalar_fmagic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
-TEST(QS8_GEMM_MINMAX_FP32_4X2__SCALAR_MAGIC, k_eq_1_subtile_n) {
+TEST(QS8_GEMM_MINMAX_FP32_4X2__SCALAR_FMAGIC, k_eq_1_subtile_n) {
for (uint32_t n = 1; n <= 2; n++) {
GemmMicrokernelTester()
.mr(4)
@@ -97829,11 +97829,11 @@
.n(n)
.k(1)
.iterations(1)
- .Test(xnn_qs8_gemm_minmax_fp32_ukernel_4x2__scalar_magic, xnn_init_qs8_conv_minmax_fp32_scalar_magic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qs8_gemm_minmax_fp32_ukernel_4x2__scalar_fmagic, xnn_init_qs8_conv_minmax_fp32_scalar_fmagic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
-TEST(QS8_GEMM_MINMAX_FP32_4X2__SCALAR_MAGIC, k_gt_1) {
+TEST(QS8_GEMM_MINMAX_FP32_4X2__SCALAR_FMAGIC, k_gt_1) {
for (size_t k = 2; k < 10; k++) {
GemmMicrokernelTester()
.mr(4)
@@ -97843,11 +97843,11 @@
.m(4)
.n(2)
.k(k)
- .Test(xnn_qs8_gemm_minmax_fp32_ukernel_4x2__scalar_magic, xnn_init_qs8_conv_minmax_fp32_scalar_magic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qs8_gemm_minmax_fp32_ukernel_4x2__scalar_fmagic, xnn_init_qs8_conv_minmax_fp32_scalar_fmagic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
-TEST(QS8_GEMM_MINMAX_FP32_4X2__SCALAR_MAGIC, k_gt_1_strided_a) {
+TEST(QS8_GEMM_MINMAX_FP32_4X2__SCALAR_FMAGIC, k_gt_1_strided_a) {
for (size_t k = 2; k < 10; k++) {
GemmMicrokernelTester()
.mr(4)
@@ -97858,11 +97858,11 @@
.n(2)
.k(k)
.a_stride(11)
- .Test(xnn_qs8_gemm_minmax_fp32_ukernel_4x2__scalar_magic, xnn_init_qs8_conv_minmax_fp32_scalar_magic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qs8_gemm_minmax_fp32_ukernel_4x2__scalar_fmagic, xnn_init_qs8_conv_minmax_fp32_scalar_fmagic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
-TEST(QS8_GEMM_MINMAX_FP32_4X2__SCALAR_MAGIC, k_gt_1_subtile) {
+TEST(QS8_GEMM_MINMAX_FP32_4X2__SCALAR_FMAGIC, k_gt_1_subtile) {
for (size_t k = 2; k < 10; k++) {
for (uint32_t m = 1; m <= 4; m++) {
for (uint32_t n = 1; n <= 2; n++) {
@@ -97875,13 +97875,13 @@
.n(n)
.k(k)
.iterations(1)
- .Test(xnn_qs8_gemm_minmax_fp32_ukernel_4x2__scalar_magic, xnn_init_qs8_conv_minmax_fp32_scalar_magic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qs8_gemm_minmax_fp32_ukernel_4x2__scalar_fmagic, xnn_init_qs8_conv_minmax_fp32_scalar_fmagic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
}
}
-TEST(QS8_GEMM_MINMAX_FP32_4X2__SCALAR_MAGIC, n_gt_2) {
+TEST(QS8_GEMM_MINMAX_FP32_4X2__SCALAR_FMAGIC, n_gt_2) {
for (uint32_t n = 3; n < 4; n++) {
for (size_t k = 1; k <= 5; k += 2) {
GemmMicrokernelTester()
@@ -97892,12 +97892,12 @@
.m(4)
.n(2)
.k(k)
- .Test(xnn_qs8_gemm_minmax_fp32_ukernel_4x2__scalar_magic, xnn_init_qs8_conv_minmax_fp32_scalar_magic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qs8_gemm_minmax_fp32_ukernel_4x2__scalar_fmagic, xnn_init_qs8_conv_minmax_fp32_scalar_fmagic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
}
-TEST(QS8_GEMM_MINMAX_FP32_4X2__SCALAR_MAGIC, n_gt_2_strided_cn) {
+TEST(QS8_GEMM_MINMAX_FP32_4X2__SCALAR_FMAGIC, n_gt_2_strided_cn) {
for (uint32_t n = 3; n < 4; n++) {
for (size_t k = 1; k <= 5; k += 2) {
GemmMicrokernelTester()
@@ -97909,12 +97909,12 @@
.n(2)
.k(k)
.cn_stride(5)
- .Test(xnn_qs8_gemm_minmax_fp32_ukernel_4x2__scalar_magic, xnn_init_qs8_conv_minmax_fp32_scalar_magic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qs8_gemm_minmax_fp32_ukernel_4x2__scalar_fmagic, xnn_init_qs8_conv_minmax_fp32_scalar_fmagic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
}
-TEST(QS8_GEMM_MINMAX_FP32_4X2__SCALAR_MAGIC, n_gt_2_strided_a) {
+TEST(QS8_GEMM_MINMAX_FP32_4X2__SCALAR_FMAGIC, n_gt_2_strided_a) {
for (uint32_t n = 3; n < 4; n++) {
for (size_t k = 1; k <= 5; k += 2) {
GemmMicrokernelTester()
@@ -97926,12 +97926,12 @@
.n(n)
.k(k)
.a_stride(7)
- .Test(xnn_qs8_gemm_minmax_fp32_ukernel_4x2__scalar_magic, xnn_init_qs8_conv_minmax_fp32_scalar_magic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qs8_gemm_minmax_fp32_ukernel_4x2__scalar_fmagic, xnn_init_qs8_conv_minmax_fp32_scalar_fmagic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
}
-TEST(QS8_GEMM_MINMAX_FP32_4X2__SCALAR_MAGIC, n_gt_2_subtile) {
+TEST(QS8_GEMM_MINMAX_FP32_4X2__SCALAR_FMAGIC, n_gt_2_subtile) {
for (uint32_t n = 3; n < 4; n++) {
for (size_t k = 1; k <= 5; k += 2) {
for (uint32_t m = 1; m <= 4; m++) {
@@ -97944,13 +97944,13 @@
.n(n)
.k(k)
.iterations(1)
- .Test(xnn_qs8_gemm_minmax_fp32_ukernel_4x2__scalar_magic, xnn_init_qs8_conv_minmax_fp32_scalar_magic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qs8_gemm_minmax_fp32_ukernel_4x2__scalar_fmagic, xnn_init_qs8_conv_minmax_fp32_scalar_fmagic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
}
}
-TEST(QS8_GEMM_MINMAX_FP32_4X2__SCALAR_MAGIC, n_div_2) {
+TEST(QS8_GEMM_MINMAX_FP32_4X2__SCALAR_FMAGIC, n_div_2) {
for (uint32_t n = 4; n <= 6; n += 2) {
for (size_t k = 1; k <= 5; k += 2) {
GemmMicrokernelTester()
@@ -97961,12 +97961,12 @@
.m(4)
.n(2)
.k(k)
- .Test(xnn_qs8_gemm_minmax_fp32_ukernel_4x2__scalar_magic, xnn_init_qs8_conv_minmax_fp32_scalar_magic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qs8_gemm_minmax_fp32_ukernel_4x2__scalar_fmagic, xnn_init_qs8_conv_minmax_fp32_scalar_fmagic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
}
-TEST(QS8_GEMM_MINMAX_FP32_4X2__SCALAR_MAGIC, n_div_2_strided_cn) {
+TEST(QS8_GEMM_MINMAX_FP32_4X2__SCALAR_FMAGIC, n_div_2_strided_cn) {
for (uint32_t n = 4; n <= 6; n += 2) {
for (size_t k = 1; k <= 5; k += 2) {
GemmMicrokernelTester()
@@ -97978,12 +97978,12 @@
.n(n)
.k(k)
.cn_stride(5)
- .Test(xnn_qs8_gemm_minmax_fp32_ukernel_4x2__scalar_magic, xnn_init_qs8_conv_minmax_fp32_scalar_magic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qs8_gemm_minmax_fp32_ukernel_4x2__scalar_fmagic, xnn_init_qs8_conv_minmax_fp32_scalar_fmagic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
}
-TEST(QS8_GEMM_MINMAX_FP32_4X2__SCALAR_MAGIC, n_div_2_strided_a) {
+TEST(QS8_GEMM_MINMAX_FP32_4X2__SCALAR_FMAGIC, n_div_2_strided_a) {
for (uint32_t n = 4; n <= 6; n += 2) {
for (size_t k = 1; k <= 5; k += 2) {
GemmMicrokernelTester()
@@ -97995,12 +97995,12 @@
.n(n)
.k(k)
.a_stride(7)
- .Test(xnn_qs8_gemm_minmax_fp32_ukernel_4x2__scalar_magic, xnn_init_qs8_conv_minmax_fp32_scalar_magic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qs8_gemm_minmax_fp32_ukernel_4x2__scalar_fmagic, xnn_init_qs8_conv_minmax_fp32_scalar_fmagic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
}
-TEST(QS8_GEMM_MINMAX_FP32_4X2__SCALAR_MAGIC, n_div_2_subtile) {
+TEST(QS8_GEMM_MINMAX_FP32_4X2__SCALAR_FMAGIC, n_div_2_subtile) {
for (uint32_t n = 4; n <= 6; n += 2) {
for (size_t k = 1; k <= 5; k += 2) {
for (uint32_t m = 1; m <= 4; m++) {
@@ -98013,13 +98013,13 @@
.n(n)
.k(k)
.iterations(1)
- .Test(xnn_qs8_gemm_minmax_fp32_ukernel_4x2__scalar_magic, xnn_init_qs8_conv_minmax_fp32_scalar_magic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qs8_gemm_minmax_fp32_ukernel_4x2__scalar_fmagic, xnn_init_qs8_conv_minmax_fp32_scalar_fmagic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
}
}
-TEST(QS8_GEMM_MINMAX_FP32_4X2__SCALAR_MAGIC, strided_cm_subtile) {
+TEST(QS8_GEMM_MINMAX_FP32_4X2__SCALAR_FMAGIC, strided_cm_subtile) {
for (size_t k = 1; k <= 5; k += 2) {
for (uint32_t m = 1; m <= 4; m++) {
for (uint32_t n = 1; n <= 2; n++) {
@@ -98033,13 +98033,13 @@
.k(k)
.cm_stride(5)
.iterations(1)
- .Test(xnn_qs8_gemm_minmax_fp32_ukernel_4x2__scalar_magic, xnn_init_qs8_conv_minmax_fp32_scalar_magic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qs8_gemm_minmax_fp32_ukernel_4x2__scalar_fmagic, xnn_init_qs8_conv_minmax_fp32_scalar_fmagic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
}
}
-TEST(QS8_GEMM_MINMAX_FP32_4X2__SCALAR_MAGIC, qmin) {
+TEST(QS8_GEMM_MINMAX_FP32_4X2__SCALAR_FMAGIC, qmin) {
GemmMicrokernelTester()
.mr(4)
.nr(2)
@@ -98049,10 +98049,10 @@
.n(2)
.k(1)
.qmin(128)
- .Test(xnn_qs8_gemm_minmax_fp32_ukernel_4x2__scalar_magic, xnn_init_qs8_conv_minmax_fp32_scalar_magic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qs8_gemm_minmax_fp32_ukernel_4x2__scalar_fmagic, xnn_init_qs8_conv_minmax_fp32_scalar_fmagic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
-TEST(QS8_GEMM_MINMAX_FP32_4X2__SCALAR_MAGIC, qmax) {
+TEST(QS8_GEMM_MINMAX_FP32_4X2__SCALAR_FMAGIC, qmax) {
GemmMicrokernelTester()
.mr(4)
.nr(2)
@@ -98062,10 +98062,10 @@
.n(2)
.k(1)
.qmax(128)
- .Test(xnn_qs8_gemm_minmax_fp32_ukernel_4x2__scalar_magic, xnn_init_qs8_conv_minmax_fp32_scalar_magic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qs8_gemm_minmax_fp32_ukernel_4x2__scalar_fmagic, xnn_init_qs8_conv_minmax_fp32_scalar_fmagic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
-TEST(QS8_GEMM_MINMAX_FP32_4X2__SCALAR_MAGIC, strided_cm) {
+TEST(QS8_GEMM_MINMAX_FP32_4X2__SCALAR_FMAGIC, strided_cm) {
GemmMicrokernelTester()
.mr(4)
.nr(2)
@@ -98075,11 +98075,11 @@
.n(2)
.k(1)
.cm_stride(5)
- .Test(xnn_qs8_gemm_minmax_fp32_ukernel_4x2__scalar_magic, xnn_init_qs8_conv_minmax_fp32_scalar_magic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qs8_gemm_minmax_fp32_ukernel_4x2__scalar_fmagic, xnn_init_qs8_conv_minmax_fp32_scalar_fmagic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
-TEST(QS8_GEMM_MINMAX_FP32_1X4__SCALAR_MAGIC, k_eq_1) {
+TEST(QS8_GEMM_MINMAX_FP32_1X4__SCALAR_FMAGIC, k_eq_1) {
GemmMicrokernelTester()
.mr(1)
.nr(4)
@@ -98088,10 +98088,10 @@
.m(1)
.n(4)
.k(1)
- .Test(xnn_qs8_gemm_minmax_fp32_ukernel_1x4__scalar_magic, xnn_init_qs8_conv_minmax_fp32_scalar_magic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qs8_gemm_minmax_fp32_ukernel_1x4__scalar_fmagic, xnn_init_qs8_conv_minmax_fp32_scalar_fmagic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
-TEST(QS8_GEMM_MINMAX_FP32_1X4__SCALAR_MAGIC, strided_cn) {
+TEST(QS8_GEMM_MINMAX_FP32_1X4__SCALAR_FMAGIC, strided_cn) {
GemmMicrokernelTester()
.mr(1)
.nr(4)
@@ -98101,10 +98101,10 @@
.n(4)
.k(1)
.cn_stride(7)
- .Test(xnn_qs8_gemm_minmax_fp32_ukernel_1x4__scalar_magic, xnn_init_qs8_conv_minmax_fp32_scalar_magic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qs8_gemm_minmax_fp32_ukernel_1x4__scalar_fmagic, xnn_init_qs8_conv_minmax_fp32_scalar_fmagic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
-TEST(QS8_GEMM_MINMAX_FP32_1X4__SCALAR_MAGIC, k_eq_1_strided_a) {
+TEST(QS8_GEMM_MINMAX_FP32_1X4__SCALAR_FMAGIC, k_eq_1_strided_a) {
GemmMicrokernelTester()
.mr(1)
.nr(4)
@@ -98114,10 +98114,10 @@
.n(4)
.k(1)
.a_stride(3)
- .Test(xnn_qs8_gemm_minmax_fp32_ukernel_1x4__scalar_magic, xnn_init_qs8_conv_minmax_fp32_scalar_magic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qs8_gemm_minmax_fp32_ukernel_1x4__scalar_fmagic, xnn_init_qs8_conv_minmax_fp32_scalar_fmagic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
-TEST(QS8_GEMM_MINMAX_FP32_1X4__SCALAR_MAGIC, k_eq_1_subtile) {
+TEST(QS8_GEMM_MINMAX_FP32_1X4__SCALAR_FMAGIC, k_eq_1_subtile) {
for (uint32_t m = 1; m <= 1; m++) {
for (uint32_t n = 1; n <= 4; n++) {
GemmMicrokernelTester()
@@ -98129,12 +98129,12 @@
.n(n)
.k(1)
.iterations(1)
- .Test(xnn_qs8_gemm_minmax_fp32_ukernel_1x4__scalar_magic, xnn_init_qs8_conv_minmax_fp32_scalar_magic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qs8_gemm_minmax_fp32_ukernel_1x4__scalar_fmagic, xnn_init_qs8_conv_minmax_fp32_scalar_fmagic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
}
-TEST(QS8_GEMM_MINMAX_FP32_1X4__SCALAR_MAGIC, k_eq_1_subtile_m) {
+TEST(QS8_GEMM_MINMAX_FP32_1X4__SCALAR_FMAGIC, k_eq_1_subtile_m) {
for (uint32_t m = 1; m <= 1; m++) {
GemmMicrokernelTester()
.mr(1)
@@ -98145,11 +98145,11 @@
.n(4)
.k(1)
.iterations(1)
- .Test(xnn_qs8_gemm_minmax_fp32_ukernel_1x4__scalar_magic, xnn_init_qs8_conv_minmax_fp32_scalar_magic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qs8_gemm_minmax_fp32_ukernel_1x4__scalar_fmagic, xnn_init_qs8_conv_minmax_fp32_scalar_fmagic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
-TEST(QS8_GEMM_MINMAX_FP32_1X4__SCALAR_MAGIC, k_eq_1_subtile_n) {
+TEST(QS8_GEMM_MINMAX_FP32_1X4__SCALAR_FMAGIC, k_eq_1_subtile_n) {
for (uint32_t n = 1; n <= 4; n++) {
GemmMicrokernelTester()
.mr(1)
@@ -98160,11 +98160,11 @@
.n(n)
.k(1)
.iterations(1)
- .Test(xnn_qs8_gemm_minmax_fp32_ukernel_1x4__scalar_magic, xnn_init_qs8_conv_minmax_fp32_scalar_magic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qs8_gemm_minmax_fp32_ukernel_1x4__scalar_fmagic, xnn_init_qs8_conv_minmax_fp32_scalar_fmagic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
-TEST(QS8_GEMM_MINMAX_FP32_1X4__SCALAR_MAGIC, k_gt_1) {
+TEST(QS8_GEMM_MINMAX_FP32_1X4__SCALAR_FMAGIC, k_gt_1) {
for (size_t k = 2; k < 10; k++) {
GemmMicrokernelTester()
.mr(1)
@@ -98174,11 +98174,11 @@
.m(1)
.n(4)
.k(k)
- .Test(xnn_qs8_gemm_minmax_fp32_ukernel_1x4__scalar_magic, xnn_init_qs8_conv_minmax_fp32_scalar_magic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qs8_gemm_minmax_fp32_ukernel_1x4__scalar_fmagic, xnn_init_qs8_conv_minmax_fp32_scalar_fmagic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
-TEST(QS8_GEMM_MINMAX_FP32_1X4__SCALAR_MAGIC, k_gt_1_strided_a) {
+TEST(QS8_GEMM_MINMAX_FP32_1X4__SCALAR_FMAGIC, k_gt_1_strided_a) {
for (size_t k = 2; k < 10; k++) {
GemmMicrokernelTester()
.mr(1)
@@ -98189,11 +98189,11 @@
.n(4)
.k(k)
.a_stride(11)
- .Test(xnn_qs8_gemm_minmax_fp32_ukernel_1x4__scalar_magic, xnn_init_qs8_conv_minmax_fp32_scalar_magic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qs8_gemm_minmax_fp32_ukernel_1x4__scalar_fmagic, xnn_init_qs8_conv_minmax_fp32_scalar_fmagic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
-TEST(QS8_GEMM_MINMAX_FP32_1X4__SCALAR_MAGIC, k_gt_1_subtile) {
+TEST(QS8_GEMM_MINMAX_FP32_1X4__SCALAR_FMAGIC, k_gt_1_subtile) {
for (size_t k = 2; k < 10; k++) {
for (uint32_t m = 1; m <= 1; m++) {
for (uint32_t n = 1; n <= 4; n++) {
@@ -98206,13 +98206,13 @@
.n(n)
.k(k)
.iterations(1)
- .Test(xnn_qs8_gemm_minmax_fp32_ukernel_1x4__scalar_magic, xnn_init_qs8_conv_minmax_fp32_scalar_magic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qs8_gemm_minmax_fp32_ukernel_1x4__scalar_fmagic, xnn_init_qs8_conv_minmax_fp32_scalar_fmagic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
}
}
-TEST(QS8_GEMM_MINMAX_FP32_1X4__SCALAR_MAGIC, n_gt_4) {
+TEST(QS8_GEMM_MINMAX_FP32_1X4__SCALAR_FMAGIC, n_gt_4) {
for (uint32_t n = 5; n < 8; n++) {
for (size_t k = 1; k <= 5; k += 2) {
GemmMicrokernelTester()
@@ -98223,12 +98223,12 @@
.m(1)
.n(4)
.k(k)
- .Test(xnn_qs8_gemm_minmax_fp32_ukernel_1x4__scalar_magic, xnn_init_qs8_conv_minmax_fp32_scalar_magic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qs8_gemm_minmax_fp32_ukernel_1x4__scalar_fmagic, xnn_init_qs8_conv_minmax_fp32_scalar_fmagic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
}
-TEST(QS8_GEMM_MINMAX_FP32_1X4__SCALAR_MAGIC, n_gt_4_strided_cn) {
+TEST(QS8_GEMM_MINMAX_FP32_1X4__SCALAR_FMAGIC, n_gt_4_strided_cn) {
for (uint32_t n = 5; n < 8; n++) {
for (size_t k = 1; k <= 5; k += 2) {
GemmMicrokernelTester()
@@ -98240,12 +98240,12 @@
.n(4)
.k(k)
.cn_stride(7)
- .Test(xnn_qs8_gemm_minmax_fp32_ukernel_1x4__scalar_magic, xnn_init_qs8_conv_minmax_fp32_scalar_magic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qs8_gemm_minmax_fp32_ukernel_1x4__scalar_fmagic, xnn_init_qs8_conv_minmax_fp32_scalar_fmagic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
}
-TEST(QS8_GEMM_MINMAX_FP32_1X4__SCALAR_MAGIC, n_gt_4_strided_a) {
+TEST(QS8_GEMM_MINMAX_FP32_1X4__SCALAR_FMAGIC, n_gt_4_strided_a) {
for (uint32_t n = 5; n < 8; n++) {
for (size_t k = 1; k <= 5; k += 2) {
GemmMicrokernelTester()
@@ -98257,12 +98257,12 @@
.n(n)
.k(k)
.a_stride(7)
- .Test(xnn_qs8_gemm_minmax_fp32_ukernel_1x4__scalar_magic, xnn_init_qs8_conv_minmax_fp32_scalar_magic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qs8_gemm_minmax_fp32_ukernel_1x4__scalar_fmagic, xnn_init_qs8_conv_minmax_fp32_scalar_fmagic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
}
-TEST(QS8_GEMM_MINMAX_FP32_1X4__SCALAR_MAGIC, n_gt_4_subtile) {
+TEST(QS8_GEMM_MINMAX_FP32_1X4__SCALAR_FMAGIC, n_gt_4_subtile) {
for (uint32_t n = 5; n < 8; n++) {
for (size_t k = 1; k <= 5; k += 2) {
for (uint32_t m = 1; m <= 1; m++) {
@@ -98275,13 +98275,13 @@
.n(n)
.k(k)
.iterations(1)
- .Test(xnn_qs8_gemm_minmax_fp32_ukernel_1x4__scalar_magic, xnn_init_qs8_conv_minmax_fp32_scalar_magic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qs8_gemm_minmax_fp32_ukernel_1x4__scalar_fmagic, xnn_init_qs8_conv_minmax_fp32_scalar_fmagic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
}
}
-TEST(QS8_GEMM_MINMAX_FP32_1X4__SCALAR_MAGIC, n_div_4) {
+TEST(QS8_GEMM_MINMAX_FP32_1X4__SCALAR_FMAGIC, n_div_4) {
for (uint32_t n = 8; n <= 12; n += 4) {
for (size_t k = 1; k <= 5; k += 2) {
GemmMicrokernelTester()
@@ -98292,12 +98292,12 @@
.m(1)
.n(4)
.k(k)
- .Test(xnn_qs8_gemm_minmax_fp32_ukernel_1x4__scalar_magic, xnn_init_qs8_conv_minmax_fp32_scalar_magic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qs8_gemm_minmax_fp32_ukernel_1x4__scalar_fmagic, xnn_init_qs8_conv_minmax_fp32_scalar_fmagic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
}
-TEST(QS8_GEMM_MINMAX_FP32_1X4__SCALAR_MAGIC, n_div_4_strided_cn) {
+TEST(QS8_GEMM_MINMAX_FP32_1X4__SCALAR_FMAGIC, n_div_4_strided_cn) {
for (uint32_t n = 8; n <= 12; n += 4) {
for (size_t k = 1; k <= 5; k += 2) {
GemmMicrokernelTester()
@@ -98309,12 +98309,12 @@
.n(n)
.k(k)
.cn_stride(7)
- .Test(xnn_qs8_gemm_minmax_fp32_ukernel_1x4__scalar_magic, xnn_init_qs8_conv_minmax_fp32_scalar_magic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qs8_gemm_minmax_fp32_ukernel_1x4__scalar_fmagic, xnn_init_qs8_conv_minmax_fp32_scalar_fmagic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
}
-TEST(QS8_GEMM_MINMAX_FP32_1X4__SCALAR_MAGIC, n_div_4_strided_a) {
+TEST(QS8_GEMM_MINMAX_FP32_1X4__SCALAR_FMAGIC, n_div_4_strided_a) {
for (uint32_t n = 8; n <= 12; n += 4) {
for (size_t k = 1; k <= 5; k += 2) {
GemmMicrokernelTester()
@@ -98326,12 +98326,12 @@
.n(n)
.k(k)
.a_stride(7)
- .Test(xnn_qs8_gemm_minmax_fp32_ukernel_1x4__scalar_magic, xnn_init_qs8_conv_minmax_fp32_scalar_magic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qs8_gemm_minmax_fp32_ukernel_1x4__scalar_fmagic, xnn_init_qs8_conv_minmax_fp32_scalar_fmagic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
}
-TEST(QS8_GEMM_MINMAX_FP32_1X4__SCALAR_MAGIC, n_div_4_subtile) {
+TEST(QS8_GEMM_MINMAX_FP32_1X4__SCALAR_FMAGIC, n_div_4_subtile) {
for (uint32_t n = 8; n <= 12; n += 4) {
for (size_t k = 1; k <= 5; k += 2) {
for (uint32_t m = 1; m <= 1; m++) {
@@ -98344,13 +98344,13 @@
.n(n)
.k(k)
.iterations(1)
- .Test(xnn_qs8_gemm_minmax_fp32_ukernel_1x4__scalar_magic, xnn_init_qs8_conv_minmax_fp32_scalar_magic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qs8_gemm_minmax_fp32_ukernel_1x4__scalar_fmagic, xnn_init_qs8_conv_minmax_fp32_scalar_fmagic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
}
}
-TEST(QS8_GEMM_MINMAX_FP32_1X4__SCALAR_MAGIC, strided_cm_subtile) {
+TEST(QS8_GEMM_MINMAX_FP32_1X4__SCALAR_FMAGIC, strided_cm_subtile) {
for (size_t k = 1; k <= 5; k += 2) {
for (uint32_t m = 1; m <= 1; m++) {
for (uint32_t n = 1; n <= 4; n++) {
@@ -98364,13 +98364,13 @@
.k(k)
.cm_stride(7)
.iterations(1)
- .Test(xnn_qs8_gemm_minmax_fp32_ukernel_1x4__scalar_magic, xnn_init_qs8_conv_minmax_fp32_scalar_magic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qs8_gemm_minmax_fp32_ukernel_1x4__scalar_fmagic, xnn_init_qs8_conv_minmax_fp32_scalar_fmagic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
}
}
-TEST(QS8_GEMM_MINMAX_FP32_1X4__SCALAR_MAGIC, qmin) {
+TEST(QS8_GEMM_MINMAX_FP32_1X4__SCALAR_FMAGIC, qmin) {
GemmMicrokernelTester()
.mr(1)
.nr(4)
@@ -98380,10 +98380,10 @@
.n(4)
.k(1)
.qmin(128)
- .Test(xnn_qs8_gemm_minmax_fp32_ukernel_1x4__scalar_magic, xnn_init_qs8_conv_minmax_fp32_scalar_magic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qs8_gemm_minmax_fp32_ukernel_1x4__scalar_fmagic, xnn_init_qs8_conv_minmax_fp32_scalar_fmagic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
-TEST(QS8_GEMM_MINMAX_FP32_1X4__SCALAR_MAGIC, qmax) {
+TEST(QS8_GEMM_MINMAX_FP32_1X4__SCALAR_FMAGIC, qmax) {
GemmMicrokernelTester()
.mr(1)
.nr(4)
@@ -98393,10 +98393,10 @@
.n(4)
.k(1)
.qmax(128)
- .Test(xnn_qs8_gemm_minmax_fp32_ukernel_1x4__scalar_magic, xnn_init_qs8_conv_minmax_fp32_scalar_magic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qs8_gemm_minmax_fp32_ukernel_1x4__scalar_fmagic, xnn_init_qs8_conv_minmax_fp32_scalar_fmagic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
-TEST(QS8_GEMM_MINMAX_FP32_1X4__SCALAR_MAGIC, strided_cm) {
+TEST(QS8_GEMM_MINMAX_FP32_1X4__SCALAR_FMAGIC, strided_cm) {
GemmMicrokernelTester()
.mr(1)
.nr(4)
@@ -98406,11 +98406,11 @@
.n(4)
.k(1)
.cm_stride(7)
- .Test(xnn_qs8_gemm_minmax_fp32_ukernel_1x4__scalar_magic, xnn_init_qs8_conv_minmax_fp32_scalar_magic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qs8_gemm_minmax_fp32_ukernel_1x4__scalar_fmagic, xnn_init_qs8_conv_minmax_fp32_scalar_fmagic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
-TEST(QS8_GEMM_MINMAX_FP32_2X4__SCALAR_MAGIC, k_eq_1) {
+TEST(QS8_GEMM_MINMAX_FP32_2X4__SCALAR_FMAGIC, k_eq_1) {
GemmMicrokernelTester()
.mr(2)
.nr(4)
@@ -98419,10 +98419,10 @@
.m(2)
.n(4)
.k(1)
- .Test(xnn_qs8_gemm_minmax_fp32_ukernel_2x4__scalar_magic, xnn_init_qs8_conv_minmax_fp32_scalar_magic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qs8_gemm_minmax_fp32_ukernel_2x4__scalar_fmagic, xnn_init_qs8_conv_minmax_fp32_scalar_fmagic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
-TEST(QS8_GEMM_MINMAX_FP32_2X4__SCALAR_MAGIC, strided_cn) {
+TEST(QS8_GEMM_MINMAX_FP32_2X4__SCALAR_FMAGIC, strided_cn) {
GemmMicrokernelTester()
.mr(2)
.nr(4)
@@ -98432,10 +98432,10 @@
.n(4)
.k(1)
.cn_stride(7)
- .Test(xnn_qs8_gemm_minmax_fp32_ukernel_2x4__scalar_magic, xnn_init_qs8_conv_minmax_fp32_scalar_magic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qs8_gemm_minmax_fp32_ukernel_2x4__scalar_fmagic, xnn_init_qs8_conv_minmax_fp32_scalar_fmagic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
-TEST(QS8_GEMM_MINMAX_FP32_2X4__SCALAR_MAGIC, k_eq_1_strided_a) {
+TEST(QS8_GEMM_MINMAX_FP32_2X4__SCALAR_FMAGIC, k_eq_1_strided_a) {
GemmMicrokernelTester()
.mr(2)
.nr(4)
@@ -98445,10 +98445,10 @@
.n(4)
.k(1)
.a_stride(3)
- .Test(xnn_qs8_gemm_minmax_fp32_ukernel_2x4__scalar_magic, xnn_init_qs8_conv_minmax_fp32_scalar_magic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qs8_gemm_minmax_fp32_ukernel_2x4__scalar_fmagic, xnn_init_qs8_conv_minmax_fp32_scalar_fmagic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
-TEST(QS8_GEMM_MINMAX_FP32_2X4__SCALAR_MAGIC, k_eq_1_subtile) {
+TEST(QS8_GEMM_MINMAX_FP32_2X4__SCALAR_FMAGIC, k_eq_1_subtile) {
for (uint32_t m = 1; m <= 2; m++) {
for (uint32_t n = 1; n <= 4; n++) {
GemmMicrokernelTester()
@@ -98460,12 +98460,12 @@
.n(n)
.k(1)
.iterations(1)
- .Test(xnn_qs8_gemm_minmax_fp32_ukernel_2x4__scalar_magic, xnn_init_qs8_conv_minmax_fp32_scalar_magic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qs8_gemm_minmax_fp32_ukernel_2x4__scalar_fmagic, xnn_init_qs8_conv_minmax_fp32_scalar_fmagic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
}
-TEST(QS8_GEMM_MINMAX_FP32_2X4__SCALAR_MAGIC, k_eq_1_subtile_m) {
+TEST(QS8_GEMM_MINMAX_FP32_2X4__SCALAR_FMAGIC, k_eq_1_subtile_m) {
for (uint32_t m = 1; m <= 2; m++) {
GemmMicrokernelTester()
.mr(2)
@@ -98476,11 +98476,11 @@
.n(4)
.k(1)
.iterations(1)
- .Test(xnn_qs8_gemm_minmax_fp32_ukernel_2x4__scalar_magic, xnn_init_qs8_conv_minmax_fp32_scalar_magic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qs8_gemm_minmax_fp32_ukernel_2x4__scalar_fmagic, xnn_init_qs8_conv_minmax_fp32_scalar_fmagic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
-TEST(QS8_GEMM_MINMAX_FP32_2X4__SCALAR_MAGIC, k_eq_1_subtile_n) {
+TEST(QS8_GEMM_MINMAX_FP32_2X4__SCALAR_FMAGIC, k_eq_1_subtile_n) {
for (uint32_t n = 1; n <= 4; n++) {
GemmMicrokernelTester()
.mr(2)
@@ -98491,11 +98491,11 @@
.n(n)
.k(1)
.iterations(1)
- .Test(xnn_qs8_gemm_minmax_fp32_ukernel_2x4__scalar_magic, xnn_init_qs8_conv_minmax_fp32_scalar_magic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qs8_gemm_minmax_fp32_ukernel_2x4__scalar_fmagic, xnn_init_qs8_conv_minmax_fp32_scalar_fmagic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
-TEST(QS8_GEMM_MINMAX_FP32_2X4__SCALAR_MAGIC, k_gt_1) {
+TEST(QS8_GEMM_MINMAX_FP32_2X4__SCALAR_FMAGIC, k_gt_1) {
for (size_t k = 2; k < 10; k++) {
GemmMicrokernelTester()
.mr(2)
@@ -98505,11 +98505,11 @@
.m(2)
.n(4)
.k(k)
- .Test(xnn_qs8_gemm_minmax_fp32_ukernel_2x4__scalar_magic, xnn_init_qs8_conv_minmax_fp32_scalar_magic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qs8_gemm_minmax_fp32_ukernel_2x4__scalar_fmagic, xnn_init_qs8_conv_minmax_fp32_scalar_fmagic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
-TEST(QS8_GEMM_MINMAX_FP32_2X4__SCALAR_MAGIC, k_gt_1_strided_a) {
+TEST(QS8_GEMM_MINMAX_FP32_2X4__SCALAR_FMAGIC, k_gt_1_strided_a) {
for (size_t k = 2; k < 10; k++) {
GemmMicrokernelTester()
.mr(2)
@@ -98520,11 +98520,11 @@
.n(4)
.k(k)
.a_stride(11)
- .Test(xnn_qs8_gemm_minmax_fp32_ukernel_2x4__scalar_magic, xnn_init_qs8_conv_minmax_fp32_scalar_magic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qs8_gemm_minmax_fp32_ukernel_2x4__scalar_fmagic, xnn_init_qs8_conv_minmax_fp32_scalar_fmagic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
-TEST(QS8_GEMM_MINMAX_FP32_2X4__SCALAR_MAGIC, k_gt_1_subtile) {
+TEST(QS8_GEMM_MINMAX_FP32_2X4__SCALAR_FMAGIC, k_gt_1_subtile) {
for (size_t k = 2; k < 10; k++) {
for (uint32_t m = 1; m <= 2; m++) {
for (uint32_t n = 1; n <= 4; n++) {
@@ -98537,13 +98537,13 @@
.n(n)
.k(k)
.iterations(1)
- .Test(xnn_qs8_gemm_minmax_fp32_ukernel_2x4__scalar_magic, xnn_init_qs8_conv_minmax_fp32_scalar_magic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qs8_gemm_minmax_fp32_ukernel_2x4__scalar_fmagic, xnn_init_qs8_conv_minmax_fp32_scalar_fmagic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
}
}
-TEST(QS8_GEMM_MINMAX_FP32_2X4__SCALAR_MAGIC, n_gt_4) {
+TEST(QS8_GEMM_MINMAX_FP32_2X4__SCALAR_FMAGIC, n_gt_4) {
for (uint32_t n = 5; n < 8; n++) {
for (size_t k = 1; k <= 5; k += 2) {
GemmMicrokernelTester()
@@ -98554,12 +98554,12 @@
.m(2)
.n(4)
.k(k)
- .Test(xnn_qs8_gemm_minmax_fp32_ukernel_2x4__scalar_magic, xnn_init_qs8_conv_minmax_fp32_scalar_magic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qs8_gemm_minmax_fp32_ukernel_2x4__scalar_fmagic, xnn_init_qs8_conv_minmax_fp32_scalar_fmagic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
}
-TEST(QS8_GEMM_MINMAX_FP32_2X4__SCALAR_MAGIC, n_gt_4_strided_cn) {
+TEST(QS8_GEMM_MINMAX_FP32_2X4__SCALAR_FMAGIC, n_gt_4_strided_cn) {
for (uint32_t n = 5; n < 8; n++) {
for (size_t k = 1; k <= 5; k += 2) {
GemmMicrokernelTester()
@@ -98571,12 +98571,12 @@
.n(4)
.k(k)
.cn_stride(7)
- .Test(xnn_qs8_gemm_minmax_fp32_ukernel_2x4__scalar_magic, xnn_init_qs8_conv_minmax_fp32_scalar_magic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qs8_gemm_minmax_fp32_ukernel_2x4__scalar_fmagic, xnn_init_qs8_conv_minmax_fp32_scalar_fmagic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
}
-TEST(QS8_GEMM_MINMAX_FP32_2X4__SCALAR_MAGIC, n_gt_4_strided_a) {
+TEST(QS8_GEMM_MINMAX_FP32_2X4__SCALAR_FMAGIC, n_gt_4_strided_a) {
for (uint32_t n = 5; n < 8; n++) {
for (size_t k = 1; k <= 5; k += 2) {
GemmMicrokernelTester()
@@ -98588,12 +98588,12 @@
.n(n)
.k(k)
.a_stride(7)
- .Test(xnn_qs8_gemm_minmax_fp32_ukernel_2x4__scalar_magic, xnn_init_qs8_conv_minmax_fp32_scalar_magic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qs8_gemm_minmax_fp32_ukernel_2x4__scalar_fmagic, xnn_init_qs8_conv_minmax_fp32_scalar_fmagic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
}
-TEST(QS8_GEMM_MINMAX_FP32_2X4__SCALAR_MAGIC, n_gt_4_subtile) {
+TEST(QS8_GEMM_MINMAX_FP32_2X4__SCALAR_FMAGIC, n_gt_4_subtile) {
for (uint32_t n = 5; n < 8; n++) {
for (size_t k = 1; k <= 5; k += 2) {
for (uint32_t m = 1; m <= 2; m++) {
@@ -98606,13 +98606,13 @@
.n(n)
.k(k)
.iterations(1)
- .Test(xnn_qs8_gemm_minmax_fp32_ukernel_2x4__scalar_magic, xnn_init_qs8_conv_minmax_fp32_scalar_magic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qs8_gemm_minmax_fp32_ukernel_2x4__scalar_fmagic, xnn_init_qs8_conv_minmax_fp32_scalar_fmagic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
}
}
-TEST(QS8_GEMM_MINMAX_FP32_2X4__SCALAR_MAGIC, n_div_4) {
+TEST(QS8_GEMM_MINMAX_FP32_2X4__SCALAR_FMAGIC, n_div_4) {
for (uint32_t n = 8; n <= 12; n += 4) {
for (size_t k = 1; k <= 5; k += 2) {
GemmMicrokernelTester()
@@ -98623,12 +98623,12 @@
.m(2)
.n(4)
.k(k)
- .Test(xnn_qs8_gemm_minmax_fp32_ukernel_2x4__scalar_magic, xnn_init_qs8_conv_minmax_fp32_scalar_magic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qs8_gemm_minmax_fp32_ukernel_2x4__scalar_fmagic, xnn_init_qs8_conv_minmax_fp32_scalar_fmagic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
}
-TEST(QS8_GEMM_MINMAX_FP32_2X4__SCALAR_MAGIC, n_div_4_strided_cn) {
+TEST(QS8_GEMM_MINMAX_FP32_2X4__SCALAR_FMAGIC, n_div_4_strided_cn) {
for (uint32_t n = 8; n <= 12; n += 4) {
for (size_t k = 1; k <= 5; k += 2) {
GemmMicrokernelTester()
@@ -98640,12 +98640,12 @@
.n(n)
.k(k)
.cn_stride(7)
- .Test(xnn_qs8_gemm_minmax_fp32_ukernel_2x4__scalar_magic, xnn_init_qs8_conv_minmax_fp32_scalar_magic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qs8_gemm_minmax_fp32_ukernel_2x4__scalar_fmagic, xnn_init_qs8_conv_minmax_fp32_scalar_fmagic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
}
-TEST(QS8_GEMM_MINMAX_FP32_2X4__SCALAR_MAGIC, n_div_4_strided_a) {
+TEST(QS8_GEMM_MINMAX_FP32_2X4__SCALAR_FMAGIC, n_div_4_strided_a) {
for (uint32_t n = 8; n <= 12; n += 4) {
for (size_t k = 1; k <= 5; k += 2) {
GemmMicrokernelTester()
@@ -98657,12 +98657,12 @@
.n(n)
.k(k)
.a_stride(7)
- .Test(xnn_qs8_gemm_minmax_fp32_ukernel_2x4__scalar_magic, xnn_init_qs8_conv_minmax_fp32_scalar_magic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qs8_gemm_minmax_fp32_ukernel_2x4__scalar_fmagic, xnn_init_qs8_conv_minmax_fp32_scalar_fmagic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
}
-TEST(QS8_GEMM_MINMAX_FP32_2X4__SCALAR_MAGIC, n_div_4_subtile) {
+TEST(QS8_GEMM_MINMAX_FP32_2X4__SCALAR_FMAGIC, n_div_4_subtile) {
for (uint32_t n = 8; n <= 12; n += 4) {
for (size_t k = 1; k <= 5; k += 2) {
for (uint32_t m = 1; m <= 2; m++) {
@@ -98675,13 +98675,13 @@
.n(n)
.k(k)
.iterations(1)
- .Test(xnn_qs8_gemm_minmax_fp32_ukernel_2x4__scalar_magic, xnn_init_qs8_conv_minmax_fp32_scalar_magic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qs8_gemm_minmax_fp32_ukernel_2x4__scalar_fmagic, xnn_init_qs8_conv_minmax_fp32_scalar_fmagic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
}
}
-TEST(QS8_GEMM_MINMAX_FP32_2X4__SCALAR_MAGIC, strided_cm_subtile) {
+TEST(QS8_GEMM_MINMAX_FP32_2X4__SCALAR_FMAGIC, strided_cm_subtile) {
for (size_t k = 1; k <= 5; k += 2) {
for (uint32_t m = 1; m <= 2; m++) {
for (uint32_t n = 1; n <= 4; n++) {
@@ -98695,13 +98695,13 @@
.k(k)
.cm_stride(7)
.iterations(1)
- .Test(xnn_qs8_gemm_minmax_fp32_ukernel_2x4__scalar_magic, xnn_init_qs8_conv_minmax_fp32_scalar_magic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qs8_gemm_minmax_fp32_ukernel_2x4__scalar_fmagic, xnn_init_qs8_conv_minmax_fp32_scalar_fmagic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
}
}
-TEST(QS8_GEMM_MINMAX_FP32_2X4__SCALAR_MAGIC, qmin) {
+TEST(QS8_GEMM_MINMAX_FP32_2X4__SCALAR_FMAGIC, qmin) {
GemmMicrokernelTester()
.mr(2)
.nr(4)
@@ -98711,10 +98711,10 @@
.n(4)
.k(1)
.qmin(128)
- .Test(xnn_qs8_gemm_minmax_fp32_ukernel_2x4__scalar_magic, xnn_init_qs8_conv_minmax_fp32_scalar_magic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qs8_gemm_minmax_fp32_ukernel_2x4__scalar_fmagic, xnn_init_qs8_conv_minmax_fp32_scalar_fmagic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
-TEST(QS8_GEMM_MINMAX_FP32_2X4__SCALAR_MAGIC, qmax) {
+TEST(QS8_GEMM_MINMAX_FP32_2X4__SCALAR_FMAGIC, qmax) {
GemmMicrokernelTester()
.mr(2)
.nr(4)
@@ -98724,10 +98724,10 @@
.n(4)
.k(1)
.qmax(128)
- .Test(xnn_qs8_gemm_minmax_fp32_ukernel_2x4__scalar_magic, xnn_init_qs8_conv_minmax_fp32_scalar_magic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qs8_gemm_minmax_fp32_ukernel_2x4__scalar_fmagic, xnn_init_qs8_conv_minmax_fp32_scalar_fmagic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
-TEST(QS8_GEMM_MINMAX_FP32_2X4__SCALAR_MAGIC, strided_cm) {
+TEST(QS8_GEMM_MINMAX_FP32_2X4__SCALAR_FMAGIC, strided_cm) {
GemmMicrokernelTester()
.mr(2)
.nr(4)
@@ -98737,11 +98737,11 @@
.n(4)
.k(1)
.cm_stride(7)
- .Test(xnn_qs8_gemm_minmax_fp32_ukernel_2x4__scalar_magic, xnn_init_qs8_conv_minmax_fp32_scalar_magic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qs8_gemm_minmax_fp32_ukernel_2x4__scalar_fmagic, xnn_init_qs8_conv_minmax_fp32_scalar_fmagic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
-TEST(QS8_GEMM_MINMAX_FP32_3X4__SCALAR_MAGIC, k_eq_1) {
+TEST(QS8_GEMM_MINMAX_FP32_3X4__SCALAR_FMAGIC, k_eq_1) {
GemmMicrokernelTester()
.mr(3)
.nr(4)
@@ -98750,10 +98750,10 @@
.m(3)
.n(4)
.k(1)
- .Test(xnn_qs8_gemm_minmax_fp32_ukernel_3x4__scalar_magic, xnn_init_qs8_conv_minmax_fp32_scalar_magic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qs8_gemm_minmax_fp32_ukernel_3x4__scalar_fmagic, xnn_init_qs8_conv_minmax_fp32_scalar_fmagic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
-TEST(QS8_GEMM_MINMAX_FP32_3X4__SCALAR_MAGIC, strided_cn) {
+TEST(QS8_GEMM_MINMAX_FP32_3X4__SCALAR_FMAGIC, strided_cn) {
GemmMicrokernelTester()
.mr(3)
.nr(4)
@@ -98763,10 +98763,10 @@
.n(4)
.k(1)
.cn_stride(7)
- .Test(xnn_qs8_gemm_minmax_fp32_ukernel_3x4__scalar_magic, xnn_init_qs8_conv_minmax_fp32_scalar_magic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qs8_gemm_minmax_fp32_ukernel_3x4__scalar_fmagic, xnn_init_qs8_conv_minmax_fp32_scalar_fmagic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
-TEST(QS8_GEMM_MINMAX_FP32_3X4__SCALAR_MAGIC, k_eq_1_strided_a) {
+TEST(QS8_GEMM_MINMAX_FP32_3X4__SCALAR_FMAGIC, k_eq_1_strided_a) {
GemmMicrokernelTester()
.mr(3)
.nr(4)
@@ -98776,10 +98776,10 @@
.n(4)
.k(1)
.a_stride(3)
- .Test(xnn_qs8_gemm_minmax_fp32_ukernel_3x4__scalar_magic, xnn_init_qs8_conv_minmax_fp32_scalar_magic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qs8_gemm_minmax_fp32_ukernel_3x4__scalar_fmagic, xnn_init_qs8_conv_minmax_fp32_scalar_fmagic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
-TEST(QS8_GEMM_MINMAX_FP32_3X4__SCALAR_MAGIC, k_eq_1_subtile) {
+TEST(QS8_GEMM_MINMAX_FP32_3X4__SCALAR_FMAGIC, k_eq_1_subtile) {
for (uint32_t m = 1; m <= 3; m++) {
for (uint32_t n = 1; n <= 4; n++) {
GemmMicrokernelTester()
@@ -98791,12 +98791,12 @@
.n(n)
.k(1)
.iterations(1)
- .Test(xnn_qs8_gemm_minmax_fp32_ukernel_3x4__scalar_magic, xnn_init_qs8_conv_minmax_fp32_scalar_magic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qs8_gemm_minmax_fp32_ukernel_3x4__scalar_fmagic, xnn_init_qs8_conv_minmax_fp32_scalar_fmagic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
}
-TEST(QS8_GEMM_MINMAX_FP32_3X4__SCALAR_MAGIC, k_eq_1_subtile_m) {
+TEST(QS8_GEMM_MINMAX_FP32_3X4__SCALAR_FMAGIC, k_eq_1_subtile_m) {
for (uint32_t m = 1; m <= 3; m++) {
GemmMicrokernelTester()
.mr(3)
@@ -98807,11 +98807,11 @@
.n(4)
.k(1)
.iterations(1)
- .Test(xnn_qs8_gemm_minmax_fp32_ukernel_3x4__scalar_magic, xnn_init_qs8_conv_minmax_fp32_scalar_magic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qs8_gemm_minmax_fp32_ukernel_3x4__scalar_fmagic, xnn_init_qs8_conv_minmax_fp32_scalar_fmagic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
-TEST(QS8_GEMM_MINMAX_FP32_3X4__SCALAR_MAGIC, k_eq_1_subtile_n) {
+TEST(QS8_GEMM_MINMAX_FP32_3X4__SCALAR_FMAGIC, k_eq_1_subtile_n) {
for (uint32_t n = 1; n <= 4; n++) {
GemmMicrokernelTester()
.mr(3)
@@ -98822,11 +98822,11 @@
.n(n)
.k(1)
.iterations(1)
- .Test(xnn_qs8_gemm_minmax_fp32_ukernel_3x4__scalar_magic, xnn_init_qs8_conv_minmax_fp32_scalar_magic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qs8_gemm_minmax_fp32_ukernel_3x4__scalar_fmagic, xnn_init_qs8_conv_minmax_fp32_scalar_fmagic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
-TEST(QS8_GEMM_MINMAX_FP32_3X4__SCALAR_MAGIC, k_gt_1) {
+TEST(QS8_GEMM_MINMAX_FP32_3X4__SCALAR_FMAGIC, k_gt_1) {
for (size_t k = 2; k < 10; k++) {
GemmMicrokernelTester()
.mr(3)
@@ -98836,11 +98836,11 @@
.m(3)
.n(4)
.k(k)
- .Test(xnn_qs8_gemm_minmax_fp32_ukernel_3x4__scalar_magic, xnn_init_qs8_conv_minmax_fp32_scalar_magic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qs8_gemm_minmax_fp32_ukernel_3x4__scalar_fmagic, xnn_init_qs8_conv_minmax_fp32_scalar_fmagic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
-TEST(QS8_GEMM_MINMAX_FP32_3X4__SCALAR_MAGIC, k_gt_1_strided_a) {
+TEST(QS8_GEMM_MINMAX_FP32_3X4__SCALAR_FMAGIC, k_gt_1_strided_a) {
for (size_t k = 2; k < 10; k++) {
GemmMicrokernelTester()
.mr(3)
@@ -98851,11 +98851,11 @@
.n(4)
.k(k)
.a_stride(11)
- .Test(xnn_qs8_gemm_minmax_fp32_ukernel_3x4__scalar_magic, xnn_init_qs8_conv_minmax_fp32_scalar_magic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qs8_gemm_minmax_fp32_ukernel_3x4__scalar_fmagic, xnn_init_qs8_conv_minmax_fp32_scalar_fmagic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
-TEST(QS8_GEMM_MINMAX_FP32_3X4__SCALAR_MAGIC, k_gt_1_subtile) {
+TEST(QS8_GEMM_MINMAX_FP32_3X4__SCALAR_FMAGIC, k_gt_1_subtile) {
for (size_t k = 2; k < 10; k++) {
for (uint32_t m = 1; m <= 3; m++) {
for (uint32_t n = 1; n <= 4; n++) {
@@ -98868,13 +98868,13 @@
.n(n)
.k(k)
.iterations(1)
- .Test(xnn_qs8_gemm_minmax_fp32_ukernel_3x4__scalar_magic, xnn_init_qs8_conv_minmax_fp32_scalar_magic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qs8_gemm_minmax_fp32_ukernel_3x4__scalar_fmagic, xnn_init_qs8_conv_minmax_fp32_scalar_fmagic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
}
}
-TEST(QS8_GEMM_MINMAX_FP32_3X4__SCALAR_MAGIC, n_gt_4) {
+TEST(QS8_GEMM_MINMAX_FP32_3X4__SCALAR_FMAGIC, n_gt_4) {
for (uint32_t n = 5; n < 8; n++) {
for (size_t k = 1; k <= 5; k += 2) {
GemmMicrokernelTester()
@@ -98885,12 +98885,12 @@
.m(3)
.n(4)
.k(k)
- .Test(xnn_qs8_gemm_minmax_fp32_ukernel_3x4__scalar_magic, xnn_init_qs8_conv_minmax_fp32_scalar_magic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qs8_gemm_minmax_fp32_ukernel_3x4__scalar_fmagic, xnn_init_qs8_conv_minmax_fp32_scalar_fmagic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
}
-TEST(QS8_GEMM_MINMAX_FP32_3X4__SCALAR_MAGIC, n_gt_4_strided_cn) {
+TEST(QS8_GEMM_MINMAX_FP32_3X4__SCALAR_FMAGIC, n_gt_4_strided_cn) {
for (uint32_t n = 5; n < 8; n++) {
for (size_t k = 1; k <= 5; k += 2) {
GemmMicrokernelTester()
@@ -98902,12 +98902,12 @@
.n(4)
.k(k)
.cn_stride(7)
- .Test(xnn_qs8_gemm_minmax_fp32_ukernel_3x4__scalar_magic, xnn_init_qs8_conv_minmax_fp32_scalar_magic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qs8_gemm_minmax_fp32_ukernel_3x4__scalar_fmagic, xnn_init_qs8_conv_minmax_fp32_scalar_fmagic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
}
-TEST(QS8_GEMM_MINMAX_FP32_3X4__SCALAR_MAGIC, n_gt_4_strided_a) {
+TEST(QS8_GEMM_MINMAX_FP32_3X4__SCALAR_FMAGIC, n_gt_4_strided_a) {
for (uint32_t n = 5; n < 8; n++) {
for (size_t k = 1; k <= 5; k += 2) {
GemmMicrokernelTester()
@@ -98919,12 +98919,12 @@
.n(n)
.k(k)
.a_stride(7)
- .Test(xnn_qs8_gemm_minmax_fp32_ukernel_3x4__scalar_magic, xnn_init_qs8_conv_minmax_fp32_scalar_magic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qs8_gemm_minmax_fp32_ukernel_3x4__scalar_fmagic, xnn_init_qs8_conv_minmax_fp32_scalar_fmagic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
}
-TEST(QS8_GEMM_MINMAX_FP32_3X4__SCALAR_MAGIC, n_gt_4_subtile) {
+TEST(QS8_GEMM_MINMAX_FP32_3X4__SCALAR_FMAGIC, n_gt_4_subtile) {
for (uint32_t n = 5; n < 8; n++) {
for (size_t k = 1; k <= 5; k += 2) {
for (uint32_t m = 1; m <= 3; m++) {
@@ -98937,13 +98937,13 @@
.n(n)
.k(k)
.iterations(1)
- .Test(xnn_qs8_gemm_minmax_fp32_ukernel_3x4__scalar_magic, xnn_init_qs8_conv_minmax_fp32_scalar_magic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qs8_gemm_minmax_fp32_ukernel_3x4__scalar_fmagic, xnn_init_qs8_conv_minmax_fp32_scalar_fmagic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
}
}
-TEST(QS8_GEMM_MINMAX_FP32_3X4__SCALAR_MAGIC, n_div_4) {
+TEST(QS8_GEMM_MINMAX_FP32_3X4__SCALAR_FMAGIC, n_div_4) {
for (uint32_t n = 8; n <= 12; n += 4) {
for (size_t k = 1; k <= 5; k += 2) {
GemmMicrokernelTester()
@@ -98954,12 +98954,12 @@
.m(3)
.n(4)
.k(k)
- .Test(xnn_qs8_gemm_minmax_fp32_ukernel_3x4__scalar_magic, xnn_init_qs8_conv_minmax_fp32_scalar_magic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qs8_gemm_minmax_fp32_ukernel_3x4__scalar_fmagic, xnn_init_qs8_conv_minmax_fp32_scalar_fmagic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
}
-TEST(QS8_GEMM_MINMAX_FP32_3X4__SCALAR_MAGIC, n_div_4_strided_cn) {
+TEST(QS8_GEMM_MINMAX_FP32_3X4__SCALAR_FMAGIC, n_div_4_strided_cn) {
for (uint32_t n = 8; n <= 12; n += 4) {
for (size_t k = 1; k <= 5; k += 2) {
GemmMicrokernelTester()
@@ -98971,12 +98971,12 @@
.n(n)
.k(k)
.cn_stride(7)
- .Test(xnn_qs8_gemm_minmax_fp32_ukernel_3x4__scalar_magic, xnn_init_qs8_conv_minmax_fp32_scalar_magic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qs8_gemm_minmax_fp32_ukernel_3x4__scalar_fmagic, xnn_init_qs8_conv_minmax_fp32_scalar_fmagic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
}
-TEST(QS8_GEMM_MINMAX_FP32_3X4__SCALAR_MAGIC, n_div_4_strided_a) {
+TEST(QS8_GEMM_MINMAX_FP32_3X4__SCALAR_FMAGIC, n_div_4_strided_a) {
for (uint32_t n = 8; n <= 12; n += 4) {
for (size_t k = 1; k <= 5; k += 2) {
GemmMicrokernelTester()
@@ -98988,12 +98988,12 @@
.n(n)
.k(k)
.a_stride(7)
- .Test(xnn_qs8_gemm_minmax_fp32_ukernel_3x4__scalar_magic, xnn_init_qs8_conv_minmax_fp32_scalar_magic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qs8_gemm_minmax_fp32_ukernel_3x4__scalar_fmagic, xnn_init_qs8_conv_minmax_fp32_scalar_fmagic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
}
-TEST(QS8_GEMM_MINMAX_FP32_3X4__SCALAR_MAGIC, n_div_4_subtile) {
+TEST(QS8_GEMM_MINMAX_FP32_3X4__SCALAR_FMAGIC, n_div_4_subtile) {
for (uint32_t n = 8; n <= 12; n += 4) {
for (size_t k = 1; k <= 5; k += 2) {
for (uint32_t m = 1; m <= 3; m++) {
@@ -99006,13 +99006,13 @@
.n(n)
.k(k)
.iterations(1)
- .Test(xnn_qs8_gemm_minmax_fp32_ukernel_3x4__scalar_magic, xnn_init_qs8_conv_minmax_fp32_scalar_magic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qs8_gemm_minmax_fp32_ukernel_3x4__scalar_fmagic, xnn_init_qs8_conv_minmax_fp32_scalar_fmagic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
}
}
-TEST(QS8_GEMM_MINMAX_FP32_3X4__SCALAR_MAGIC, strided_cm_subtile) {
+TEST(QS8_GEMM_MINMAX_FP32_3X4__SCALAR_FMAGIC, strided_cm_subtile) {
for (size_t k = 1; k <= 5; k += 2) {
for (uint32_t m = 1; m <= 3; m++) {
for (uint32_t n = 1; n <= 4; n++) {
@@ -99026,13 +99026,13 @@
.k(k)
.cm_stride(7)
.iterations(1)
- .Test(xnn_qs8_gemm_minmax_fp32_ukernel_3x4__scalar_magic, xnn_init_qs8_conv_minmax_fp32_scalar_magic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qs8_gemm_minmax_fp32_ukernel_3x4__scalar_fmagic, xnn_init_qs8_conv_minmax_fp32_scalar_fmagic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
}
}
-TEST(QS8_GEMM_MINMAX_FP32_3X4__SCALAR_MAGIC, qmin) {
+TEST(QS8_GEMM_MINMAX_FP32_3X4__SCALAR_FMAGIC, qmin) {
GemmMicrokernelTester()
.mr(3)
.nr(4)
@@ -99042,10 +99042,10 @@
.n(4)
.k(1)
.qmin(128)
- .Test(xnn_qs8_gemm_minmax_fp32_ukernel_3x4__scalar_magic, xnn_init_qs8_conv_minmax_fp32_scalar_magic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qs8_gemm_minmax_fp32_ukernel_3x4__scalar_fmagic, xnn_init_qs8_conv_minmax_fp32_scalar_fmagic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
-TEST(QS8_GEMM_MINMAX_FP32_3X4__SCALAR_MAGIC, qmax) {
+TEST(QS8_GEMM_MINMAX_FP32_3X4__SCALAR_FMAGIC, qmax) {
GemmMicrokernelTester()
.mr(3)
.nr(4)
@@ -99055,10 +99055,10 @@
.n(4)
.k(1)
.qmax(128)
- .Test(xnn_qs8_gemm_minmax_fp32_ukernel_3x4__scalar_magic, xnn_init_qs8_conv_minmax_fp32_scalar_magic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qs8_gemm_minmax_fp32_ukernel_3x4__scalar_fmagic, xnn_init_qs8_conv_minmax_fp32_scalar_fmagic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
-TEST(QS8_GEMM_MINMAX_FP32_3X4__SCALAR_MAGIC, strided_cm) {
+TEST(QS8_GEMM_MINMAX_FP32_3X4__SCALAR_FMAGIC, strided_cm) {
GemmMicrokernelTester()
.mr(3)
.nr(4)
@@ -99068,11 +99068,11 @@
.n(4)
.k(1)
.cm_stride(7)
- .Test(xnn_qs8_gemm_minmax_fp32_ukernel_3x4__scalar_magic, xnn_init_qs8_conv_minmax_fp32_scalar_magic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qs8_gemm_minmax_fp32_ukernel_3x4__scalar_fmagic, xnn_init_qs8_conv_minmax_fp32_scalar_fmagic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
-TEST(QS8_GEMM_MINMAX_FP32_4X4__SCALAR_MAGIC, k_eq_1) {
+TEST(QS8_GEMM_MINMAX_FP32_4X4__SCALAR_FMAGIC, k_eq_1) {
GemmMicrokernelTester()
.mr(4)
.nr(4)
@@ -99081,10 +99081,10 @@
.m(4)
.n(4)
.k(1)
- .Test(xnn_qs8_gemm_minmax_fp32_ukernel_4x4__scalar_magic, xnn_init_qs8_conv_minmax_fp32_scalar_magic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qs8_gemm_minmax_fp32_ukernel_4x4__scalar_fmagic, xnn_init_qs8_conv_minmax_fp32_scalar_fmagic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
-TEST(QS8_GEMM_MINMAX_FP32_4X4__SCALAR_MAGIC, strided_cn) {
+TEST(QS8_GEMM_MINMAX_FP32_4X4__SCALAR_FMAGIC, strided_cn) {
GemmMicrokernelTester()
.mr(4)
.nr(4)
@@ -99094,10 +99094,10 @@
.n(4)
.k(1)
.cn_stride(7)
- .Test(xnn_qs8_gemm_minmax_fp32_ukernel_4x4__scalar_magic, xnn_init_qs8_conv_minmax_fp32_scalar_magic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qs8_gemm_minmax_fp32_ukernel_4x4__scalar_fmagic, xnn_init_qs8_conv_minmax_fp32_scalar_fmagic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
-TEST(QS8_GEMM_MINMAX_FP32_4X4__SCALAR_MAGIC, k_eq_1_strided_a) {
+TEST(QS8_GEMM_MINMAX_FP32_4X4__SCALAR_FMAGIC, k_eq_1_strided_a) {
GemmMicrokernelTester()
.mr(4)
.nr(4)
@@ -99107,10 +99107,10 @@
.n(4)
.k(1)
.a_stride(3)
- .Test(xnn_qs8_gemm_minmax_fp32_ukernel_4x4__scalar_magic, xnn_init_qs8_conv_minmax_fp32_scalar_magic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qs8_gemm_minmax_fp32_ukernel_4x4__scalar_fmagic, xnn_init_qs8_conv_minmax_fp32_scalar_fmagic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
-TEST(QS8_GEMM_MINMAX_FP32_4X4__SCALAR_MAGIC, k_eq_1_subtile) {
+TEST(QS8_GEMM_MINMAX_FP32_4X4__SCALAR_FMAGIC, k_eq_1_subtile) {
for (uint32_t m = 1; m <= 4; m++) {
for (uint32_t n = 1; n <= 4; n++) {
GemmMicrokernelTester()
@@ -99122,12 +99122,12 @@
.n(n)
.k(1)
.iterations(1)
- .Test(xnn_qs8_gemm_minmax_fp32_ukernel_4x4__scalar_magic, xnn_init_qs8_conv_minmax_fp32_scalar_magic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qs8_gemm_minmax_fp32_ukernel_4x4__scalar_fmagic, xnn_init_qs8_conv_minmax_fp32_scalar_fmagic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
}
-TEST(QS8_GEMM_MINMAX_FP32_4X4__SCALAR_MAGIC, k_eq_1_subtile_m) {
+TEST(QS8_GEMM_MINMAX_FP32_4X4__SCALAR_FMAGIC, k_eq_1_subtile_m) {
for (uint32_t m = 1; m <= 4; m++) {
GemmMicrokernelTester()
.mr(4)
@@ -99138,11 +99138,11 @@
.n(4)
.k(1)
.iterations(1)
- .Test(xnn_qs8_gemm_minmax_fp32_ukernel_4x4__scalar_magic, xnn_init_qs8_conv_minmax_fp32_scalar_magic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qs8_gemm_minmax_fp32_ukernel_4x4__scalar_fmagic, xnn_init_qs8_conv_minmax_fp32_scalar_fmagic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
-TEST(QS8_GEMM_MINMAX_FP32_4X4__SCALAR_MAGIC, k_eq_1_subtile_n) {
+TEST(QS8_GEMM_MINMAX_FP32_4X4__SCALAR_FMAGIC, k_eq_1_subtile_n) {
for (uint32_t n = 1; n <= 4; n++) {
GemmMicrokernelTester()
.mr(4)
@@ -99153,11 +99153,11 @@
.n(n)
.k(1)
.iterations(1)
- .Test(xnn_qs8_gemm_minmax_fp32_ukernel_4x4__scalar_magic, xnn_init_qs8_conv_minmax_fp32_scalar_magic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qs8_gemm_minmax_fp32_ukernel_4x4__scalar_fmagic, xnn_init_qs8_conv_minmax_fp32_scalar_fmagic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
-TEST(QS8_GEMM_MINMAX_FP32_4X4__SCALAR_MAGIC, k_gt_1) {
+TEST(QS8_GEMM_MINMAX_FP32_4X4__SCALAR_FMAGIC, k_gt_1) {
for (size_t k = 2; k < 10; k++) {
GemmMicrokernelTester()
.mr(4)
@@ -99167,11 +99167,11 @@
.m(4)
.n(4)
.k(k)
- .Test(xnn_qs8_gemm_minmax_fp32_ukernel_4x4__scalar_magic, xnn_init_qs8_conv_minmax_fp32_scalar_magic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qs8_gemm_minmax_fp32_ukernel_4x4__scalar_fmagic, xnn_init_qs8_conv_minmax_fp32_scalar_fmagic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
-TEST(QS8_GEMM_MINMAX_FP32_4X4__SCALAR_MAGIC, k_gt_1_strided_a) {
+TEST(QS8_GEMM_MINMAX_FP32_4X4__SCALAR_FMAGIC, k_gt_1_strided_a) {
for (size_t k = 2; k < 10; k++) {
GemmMicrokernelTester()
.mr(4)
@@ -99182,11 +99182,11 @@
.n(4)
.k(k)
.a_stride(11)
- .Test(xnn_qs8_gemm_minmax_fp32_ukernel_4x4__scalar_magic, xnn_init_qs8_conv_minmax_fp32_scalar_magic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qs8_gemm_minmax_fp32_ukernel_4x4__scalar_fmagic, xnn_init_qs8_conv_minmax_fp32_scalar_fmagic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
-TEST(QS8_GEMM_MINMAX_FP32_4X4__SCALAR_MAGIC, k_gt_1_subtile) {
+TEST(QS8_GEMM_MINMAX_FP32_4X4__SCALAR_FMAGIC, k_gt_1_subtile) {
for (size_t k = 2; k < 10; k++) {
for (uint32_t m = 1; m <= 4; m++) {
for (uint32_t n = 1; n <= 4; n++) {
@@ -99199,13 +99199,13 @@
.n(n)
.k(k)
.iterations(1)
- .Test(xnn_qs8_gemm_minmax_fp32_ukernel_4x4__scalar_magic, xnn_init_qs8_conv_minmax_fp32_scalar_magic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qs8_gemm_minmax_fp32_ukernel_4x4__scalar_fmagic, xnn_init_qs8_conv_minmax_fp32_scalar_fmagic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
}
}
-TEST(QS8_GEMM_MINMAX_FP32_4X4__SCALAR_MAGIC, n_gt_4) {
+TEST(QS8_GEMM_MINMAX_FP32_4X4__SCALAR_FMAGIC, n_gt_4) {
for (uint32_t n = 5; n < 8; n++) {
for (size_t k = 1; k <= 5; k += 2) {
GemmMicrokernelTester()
@@ -99216,12 +99216,12 @@
.m(4)
.n(4)
.k(k)
- .Test(xnn_qs8_gemm_minmax_fp32_ukernel_4x4__scalar_magic, xnn_init_qs8_conv_minmax_fp32_scalar_magic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qs8_gemm_minmax_fp32_ukernel_4x4__scalar_fmagic, xnn_init_qs8_conv_minmax_fp32_scalar_fmagic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
}
-TEST(QS8_GEMM_MINMAX_FP32_4X4__SCALAR_MAGIC, n_gt_4_strided_cn) {
+TEST(QS8_GEMM_MINMAX_FP32_4X4__SCALAR_FMAGIC, n_gt_4_strided_cn) {
for (uint32_t n = 5; n < 8; n++) {
for (size_t k = 1; k <= 5; k += 2) {
GemmMicrokernelTester()
@@ -99233,12 +99233,12 @@
.n(4)
.k(k)
.cn_stride(7)
- .Test(xnn_qs8_gemm_minmax_fp32_ukernel_4x4__scalar_magic, xnn_init_qs8_conv_minmax_fp32_scalar_magic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qs8_gemm_minmax_fp32_ukernel_4x4__scalar_fmagic, xnn_init_qs8_conv_minmax_fp32_scalar_fmagic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
}
-TEST(QS8_GEMM_MINMAX_FP32_4X4__SCALAR_MAGIC, n_gt_4_strided_a) {
+TEST(QS8_GEMM_MINMAX_FP32_4X4__SCALAR_FMAGIC, n_gt_4_strided_a) {
for (uint32_t n = 5; n < 8; n++) {
for (size_t k = 1; k <= 5; k += 2) {
GemmMicrokernelTester()
@@ -99250,12 +99250,12 @@
.n(n)
.k(k)
.a_stride(7)
- .Test(xnn_qs8_gemm_minmax_fp32_ukernel_4x4__scalar_magic, xnn_init_qs8_conv_minmax_fp32_scalar_magic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qs8_gemm_minmax_fp32_ukernel_4x4__scalar_fmagic, xnn_init_qs8_conv_minmax_fp32_scalar_fmagic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
}
-TEST(QS8_GEMM_MINMAX_FP32_4X4__SCALAR_MAGIC, n_gt_4_subtile) {
+TEST(QS8_GEMM_MINMAX_FP32_4X4__SCALAR_FMAGIC, n_gt_4_subtile) {
for (uint32_t n = 5; n < 8; n++) {
for (size_t k = 1; k <= 5; k += 2) {
for (uint32_t m = 1; m <= 4; m++) {
@@ -99268,13 +99268,13 @@
.n(n)
.k(k)
.iterations(1)
- .Test(xnn_qs8_gemm_minmax_fp32_ukernel_4x4__scalar_magic, xnn_init_qs8_conv_minmax_fp32_scalar_magic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qs8_gemm_minmax_fp32_ukernel_4x4__scalar_fmagic, xnn_init_qs8_conv_minmax_fp32_scalar_fmagic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
}
}
-TEST(QS8_GEMM_MINMAX_FP32_4X4__SCALAR_MAGIC, n_div_4) {
+TEST(QS8_GEMM_MINMAX_FP32_4X4__SCALAR_FMAGIC, n_div_4) {
for (uint32_t n = 8; n <= 12; n += 4) {
for (size_t k = 1; k <= 5; k += 2) {
GemmMicrokernelTester()
@@ -99285,12 +99285,12 @@
.m(4)
.n(4)
.k(k)
- .Test(xnn_qs8_gemm_minmax_fp32_ukernel_4x4__scalar_magic, xnn_init_qs8_conv_minmax_fp32_scalar_magic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qs8_gemm_minmax_fp32_ukernel_4x4__scalar_fmagic, xnn_init_qs8_conv_minmax_fp32_scalar_fmagic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
}
-TEST(QS8_GEMM_MINMAX_FP32_4X4__SCALAR_MAGIC, n_div_4_strided_cn) {
+TEST(QS8_GEMM_MINMAX_FP32_4X4__SCALAR_FMAGIC, n_div_4_strided_cn) {
for (uint32_t n = 8; n <= 12; n += 4) {
for (size_t k = 1; k <= 5; k += 2) {
GemmMicrokernelTester()
@@ -99302,12 +99302,12 @@
.n(n)
.k(k)
.cn_stride(7)
- .Test(xnn_qs8_gemm_minmax_fp32_ukernel_4x4__scalar_magic, xnn_init_qs8_conv_minmax_fp32_scalar_magic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qs8_gemm_minmax_fp32_ukernel_4x4__scalar_fmagic, xnn_init_qs8_conv_minmax_fp32_scalar_fmagic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
}
-TEST(QS8_GEMM_MINMAX_FP32_4X4__SCALAR_MAGIC, n_div_4_strided_a) {
+TEST(QS8_GEMM_MINMAX_FP32_4X4__SCALAR_FMAGIC, n_div_4_strided_a) {
for (uint32_t n = 8; n <= 12; n += 4) {
for (size_t k = 1; k <= 5; k += 2) {
GemmMicrokernelTester()
@@ -99319,12 +99319,12 @@
.n(n)
.k(k)
.a_stride(7)
- .Test(xnn_qs8_gemm_minmax_fp32_ukernel_4x4__scalar_magic, xnn_init_qs8_conv_minmax_fp32_scalar_magic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qs8_gemm_minmax_fp32_ukernel_4x4__scalar_fmagic, xnn_init_qs8_conv_minmax_fp32_scalar_fmagic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
}
-TEST(QS8_GEMM_MINMAX_FP32_4X4__SCALAR_MAGIC, n_div_4_subtile) {
+TEST(QS8_GEMM_MINMAX_FP32_4X4__SCALAR_FMAGIC, n_div_4_subtile) {
for (uint32_t n = 8; n <= 12; n += 4) {
for (size_t k = 1; k <= 5; k += 2) {
for (uint32_t m = 1; m <= 4; m++) {
@@ -99337,13 +99337,13 @@
.n(n)
.k(k)
.iterations(1)
- .Test(xnn_qs8_gemm_minmax_fp32_ukernel_4x4__scalar_magic, xnn_init_qs8_conv_minmax_fp32_scalar_magic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qs8_gemm_minmax_fp32_ukernel_4x4__scalar_fmagic, xnn_init_qs8_conv_minmax_fp32_scalar_fmagic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
}
}
-TEST(QS8_GEMM_MINMAX_FP32_4X4__SCALAR_MAGIC, strided_cm_subtile) {
+TEST(QS8_GEMM_MINMAX_FP32_4X4__SCALAR_FMAGIC, strided_cm_subtile) {
for (size_t k = 1; k <= 5; k += 2) {
for (uint32_t m = 1; m <= 4; m++) {
for (uint32_t n = 1; n <= 4; n++) {
@@ -99357,13 +99357,13 @@
.k(k)
.cm_stride(7)
.iterations(1)
- .Test(xnn_qs8_gemm_minmax_fp32_ukernel_4x4__scalar_magic, xnn_init_qs8_conv_minmax_fp32_scalar_magic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qs8_gemm_minmax_fp32_ukernel_4x4__scalar_fmagic, xnn_init_qs8_conv_minmax_fp32_scalar_fmagic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
}
}
-TEST(QS8_GEMM_MINMAX_FP32_4X4__SCALAR_MAGIC, qmin) {
+TEST(QS8_GEMM_MINMAX_FP32_4X4__SCALAR_FMAGIC, qmin) {
GemmMicrokernelTester()
.mr(4)
.nr(4)
@@ -99373,10 +99373,10 @@
.n(4)
.k(1)
.qmin(128)
- .Test(xnn_qs8_gemm_minmax_fp32_ukernel_4x4__scalar_magic, xnn_init_qs8_conv_minmax_fp32_scalar_magic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qs8_gemm_minmax_fp32_ukernel_4x4__scalar_fmagic, xnn_init_qs8_conv_minmax_fp32_scalar_fmagic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
-TEST(QS8_GEMM_MINMAX_FP32_4X4__SCALAR_MAGIC, qmax) {
+TEST(QS8_GEMM_MINMAX_FP32_4X4__SCALAR_FMAGIC, qmax) {
GemmMicrokernelTester()
.mr(4)
.nr(4)
@@ -99386,10 +99386,10 @@
.n(4)
.k(1)
.qmax(128)
- .Test(xnn_qs8_gemm_minmax_fp32_ukernel_4x4__scalar_magic, xnn_init_qs8_conv_minmax_fp32_scalar_magic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qs8_gemm_minmax_fp32_ukernel_4x4__scalar_fmagic, xnn_init_qs8_conv_minmax_fp32_scalar_fmagic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
-TEST(QS8_GEMM_MINMAX_FP32_4X4__SCALAR_MAGIC, strided_cm) {
+TEST(QS8_GEMM_MINMAX_FP32_4X4__SCALAR_FMAGIC, strided_cm) {
GemmMicrokernelTester()
.mr(4)
.nr(4)
@@ -99399,5 +99399,5 @@
.n(4)
.k(1)
.cm_stride(7)
- .Test(xnn_qs8_gemm_minmax_fp32_ukernel_4x4__scalar_magic, xnn_init_qs8_conv_minmax_fp32_scalar_magic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qs8_gemm_minmax_fp32_ukernel_4x4__scalar_fmagic, xnn_init_qs8_conv_minmax_fp32_scalar_fmagic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
diff --git a/test/qs8-gemm-minmax-fp32.yaml b/test/qs8-gemm-minmax-fp32.yaml
index 9057e58..96a0baa 100644
--- a/test/qs8-gemm-minmax-fp32.yaml
+++ b/test/qs8-gemm-minmax-fp32.yaml
@@ -650,51 +650,51 @@
- name: xnn_qs8_gemm_xw_minmax_fp32_ukernel_3x4c8__wasmsimd_mul16
init: xnn_init_qs8_conv_minmax_fp32_wasmsimd_params
k-block: 8
-- name: xnn_qs8_gemm_minmax_fp32_ukernel_1x2__scalar_lrint
- init: xnn_init_qs8_conv_minmax_fp32_scalar_lrint_params
+- name: xnn_qs8_gemm_minmax_fp32_ukernel_1x2__scalar_lrintf
+ init: xnn_init_qs8_conv_minmax_fp32_scalar_lrintf_params
k-block: 1
-- name: xnn_qs8_gemm_minmax_fp32_ukernel_2x2__scalar_lrint
- init: xnn_init_qs8_conv_minmax_fp32_scalar_lrint_params
+- name: xnn_qs8_gemm_minmax_fp32_ukernel_2x2__scalar_lrintf
+ init: xnn_init_qs8_conv_minmax_fp32_scalar_lrintf_params
k-block: 1
-- name: xnn_qs8_gemm_minmax_fp32_ukernel_3x2__scalar_lrint
- init: xnn_init_qs8_conv_minmax_fp32_scalar_lrint_params
+- name: xnn_qs8_gemm_minmax_fp32_ukernel_3x2__scalar_lrintf
+ init: xnn_init_qs8_conv_minmax_fp32_scalar_lrintf_params
k-block: 1
-- name: xnn_qs8_gemm_minmax_fp32_ukernel_4x2__scalar_lrint
- init: xnn_init_qs8_conv_minmax_fp32_scalar_lrint_params
+- name: xnn_qs8_gemm_minmax_fp32_ukernel_4x2__scalar_lrintf
+ init: xnn_init_qs8_conv_minmax_fp32_scalar_lrintf_params
k-block: 1
-- name: xnn_qs8_gemm_minmax_fp32_ukernel_1x4__scalar_lrint
- init: xnn_init_qs8_conv_minmax_fp32_scalar_lrint_params
+- name: xnn_qs8_gemm_minmax_fp32_ukernel_1x4__scalar_lrintf
+ init: xnn_init_qs8_conv_minmax_fp32_scalar_lrintf_params
k-block: 1
-- name: xnn_qs8_gemm_minmax_fp32_ukernel_2x4__scalar_lrint
- init: xnn_init_qs8_conv_minmax_fp32_scalar_lrint_params
+- name: xnn_qs8_gemm_minmax_fp32_ukernel_2x4__scalar_lrintf
+ init: xnn_init_qs8_conv_minmax_fp32_scalar_lrintf_params
k-block: 1
-- name: xnn_qs8_gemm_minmax_fp32_ukernel_3x4__scalar_lrint
- init: xnn_init_qs8_conv_minmax_fp32_scalar_lrint_params
+- name: xnn_qs8_gemm_minmax_fp32_ukernel_3x4__scalar_lrintf
+ init: xnn_init_qs8_conv_minmax_fp32_scalar_lrintf_params
k-block: 1
-- name: xnn_qs8_gemm_minmax_fp32_ukernel_4x4__scalar_lrint
- init: xnn_init_qs8_conv_minmax_fp32_scalar_lrint_params
+- name: xnn_qs8_gemm_minmax_fp32_ukernel_4x4__scalar_lrintf
+ init: xnn_init_qs8_conv_minmax_fp32_scalar_lrintf_params
k-block: 1
-- name: xnn_qs8_gemm_minmax_fp32_ukernel_1x2__scalar_magic
- init: xnn_init_qs8_conv_minmax_fp32_scalar_magic_params
+- name: xnn_qs8_gemm_minmax_fp32_ukernel_1x2__scalar_fmagic
+ init: xnn_init_qs8_conv_minmax_fp32_scalar_fmagic_params
k-block: 1
-- name: xnn_qs8_gemm_minmax_fp32_ukernel_2x2__scalar_magic
- init: xnn_init_qs8_conv_minmax_fp32_scalar_magic_params
+- name: xnn_qs8_gemm_minmax_fp32_ukernel_2x2__scalar_fmagic
+ init: xnn_init_qs8_conv_minmax_fp32_scalar_fmagic_params
k-block: 1
-- name: xnn_qs8_gemm_minmax_fp32_ukernel_3x2__scalar_magic
- init: xnn_init_qs8_conv_minmax_fp32_scalar_magic_params
+- name: xnn_qs8_gemm_minmax_fp32_ukernel_3x2__scalar_fmagic
+ init: xnn_init_qs8_conv_minmax_fp32_scalar_fmagic_params
k-block: 1
-- name: xnn_qs8_gemm_minmax_fp32_ukernel_4x2__scalar_magic
- init: xnn_init_qs8_conv_minmax_fp32_scalar_magic_params
+- name: xnn_qs8_gemm_minmax_fp32_ukernel_4x2__scalar_fmagic
+ init: xnn_init_qs8_conv_minmax_fp32_scalar_fmagic_params
k-block: 1
-- name: xnn_qs8_gemm_minmax_fp32_ukernel_1x4__scalar_magic
- init: xnn_init_qs8_conv_minmax_fp32_scalar_magic_params
+- name: xnn_qs8_gemm_minmax_fp32_ukernel_1x4__scalar_fmagic
+ init: xnn_init_qs8_conv_minmax_fp32_scalar_fmagic_params
k-block: 1
-- name: xnn_qs8_gemm_minmax_fp32_ukernel_2x4__scalar_magic
- init: xnn_init_qs8_conv_minmax_fp32_scalar_magic_params
+- name: xnn_qs8_gemm_minmax_fp32_ukernel_2x4__scalar_fmagic
+ init: xnn_init_qs8_conv_minmax_fp32_scalar_fmagic_params
k-block: 1
-- name: xnn_qs8_gemm_minmax_fp32_ukernel_3x4__scalar_magic
- init: xnn_init_qs8_conv_minmax_fp32_scalar_magic_params
+- name: xnn_qs8_gemm_minmax_fp32_ukernel_3x4__scalar_fmagic
+ init: xnn_init_qs8_conv_minmax_fp32_scalar_fmagic_params
k-block: 1
-- name: xnn_qs8_gemm_minmax_fp32_ukernel_4x4__scalar_magic
- init: xnn_init_qs8_conv_minmax_fp32_scalar_magic_params
+- name: xnn_qs8_gemm_minmax_fp32_ukernel_4x4__scalar_fmagic
+ init: xnn_init_qs8_conv_minmax_fp32_scalar_fmagic_params
k-block: 1
diff --git a/test/qs8-gemm-minmax-rndnu.cc b/test/qs8-gemm-minmax-rndnu.cc
index 9081cc3..3075018 100644
--- a/test/qs8-gemm-minmax-rndnu.cc
+++ b/test/qs8-gemm-minmax-rndnu.cc
@@ -104445,2651 +104445,3 @@
.Test(xnn_qs8_gemm_minmax_rndnu_ukernel_4x16__neon_mull_addw_dup, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
}
#endif // XNN_ARCH_ARM || XNN_ARCH_ARM64
-
-
-TEST(QS8_GEMM_MINMAX_RNDNU_1X2__SCALAR, k_eq_1) {
- GemmMicrokernelTester()
- .mr(1)
- .nr(2)
- .kr(1)
- .sr(1)
- .m(1)
- .n(2)
- .k(1)
- .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_1x2__scalar, xnn_init_qs8_conv_minmax_rndnu_scalar_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
-}
-
-TEST(QS8_GEMM_MINMAX_RNDNU_1X2__SCALAR, strided_cn) {
- GemmMicrokernelTester()
- .mr(1)
- .nr(2)
- .kr(1)
- .sr(1)
- .m(1)
- .n(2)
- .k(1)
- .cn_stride(5)
- .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_1x2__scalar, xnn_init_qs8_conv_minmax_rndnu_scalar_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
-}
-
-TEST(QS8_GEMM_MINMAX_RNDNU_1X2__SCALAR, k_eq_1_strided_a) {
- GemmMicrokernelTester()
- .mr(1)
- .nr(2)
- .kr(1)
- .sr(1)
- .m(1)
- .n(2)
- .k(1)
- .a_stride(3)
- .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_1x2__scalar, xnn_init_qs8_conv_minmax_rndnu_scalar_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
-}
-
-TEST(QS8_GEMM_MINMAX_RNDNU_1X2__SCALAR, k_eq_1_subtile) {
- for (uint32_t m = 1; m <= 1; m++) {
- for (uint32_t n = 1; n <= 2; n++) {
- GemmMicrokernelTester()
- .mr(1)
- .nr(2)
- .kr(1)
- .sr(1)
- .m(m)
- .n(n)
- .k(1)
- .iterations(1)
- .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_1x2__scalar, xnn_init_qs8_conv_minmax_rndnu_scalar_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
- }
- }
-}
-
-TEST(QS8_GEMM_MINMAX_RNDNU_1X2__SCALAR, k_eq_1_subtile_m) {
- for (uint32_t m = 1; m <= 1; m++) {
- GemmMicrokernelTester()
- .mr(1)
- .nr(2)
- .kr(1)
- .sr(1)
- .m(m)
- .n(2)
- .k(1)
- .iterations(1)
- .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_1x2__scalar, xnn_init_qs8_conv_minmax_rndnu_scalar_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
- }
-}
-
-TEST(QS8_GEMM_MINMAX_RNDNU_1X2__SCALAR, k_eq_1_subtile_n) {
- for (uint32_t n = 1; n <= 2; n++) {
- GemmMicrokernelTester()
- .mr(1)
- .nr(2)
- .kr(1)
- .sr(1)
- .m(1)
- .n(n)
- .k(1)
- .iterations(1)
- .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_1x2__scalar, xnn_init_qs8_conv_minmax_rndnu_scalar_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
- }
-}
-
-TEST(QS8_GEMM_MINMAX_RNDNU_1X2__SCALAR, k_gt_1) {
- for (size_t k = 2; k < 10; k++) {
- GemmMicrokernelTester()
- .mr(1)
- .nr(2)
- .kr(1)
- .sr(1)
- .m(1)
- .n(2)
- .k(k)
- .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_1x2__scalar, xnn_init_qs8_conv_minmax_rndnu_scalar_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
- }
-}
-
-TEST(QS8_GEMM_MINMAX_RNDNU_1X2__SCALAR, k_gt_1_strided_a) {
- for (size_t k = 2; k < 10; k++) {
- GemmMicrokernelTester()
- .mr(1)
- .nr(2)
- .kr(1)
- .sr(1)
- .m(1)
- .n(2)
- .k(k)
- .a_stride(11)
- .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_1x2__scalar, xnn_init_qs8_conv_minmax_rndnu_scalar_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
- }
-}
-
-TEST(QS8_GEMM_MINMAX_RNDNU_1X2__SCALAR, k_gt_1_subtile) {
- for (size_t k = 2; k < 10; k++) {
- for (uint32_t m = 1; m <= 1; m++) {
- for (uint32_t n = 1; n <= 2; n++) {
- GemmMicrokernelTester()
- .mr(1)
- .nr(2)
- .kr(1)
- .sr(1)
- .m(m)
- .n(n)
- .k(k)
- .iterations(1)
- .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_1x2__scalar, xnn_init_qs8_conv_minmax_rndnu_scalar_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
- }
- }
- }
-}
-
-TEST(QS8_GEMM_MINMAX_RNDNU_1X2__SCALAR, n_gt_2) {
- for (uint32_t n = 3; n < 4; n++) {
- for (size_t k = 1; k <= 5; k += 2) {
- GemmMicrokernelTester()
- .mr(1)
- .nr(2)
- .kr(1)
- .sr(1)
- .m(1)
- .n(2)
- .k(k)
- .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_1x2__scalar, xnn_init_qs8_conv_minmax_rndnu_scalar_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
- }
- }
-}
-
-TEST(QS8_GEMM_MINMAX_RNDNU_1X2__SCALAR, n_gt_2_strided_cn) {
- for (uint32_t n = 3; n < 4; n++) {
- for (size_t k = 1; k <= 5; k += 2) {
- GemmMicrokernelTester()
- .mr(1)
- .nr(2)
- .kr(1)
- .sr(1)
- .m(1)
- .n(2)
- .k(k)
- .cn_stride(5)
- .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_1x2__scalar, xnn_init_qs8_conv_minmax_rndnu_scalar_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
- }
- }
-}
-
-TEST(QS8_GEMM_MINMAX_RNDNU_1X2__SCALAR, n_gt_2_strided_a) {
- for (uint32_t n = 3; n < 4; n++) {
- for (size_t k = 1; k <= 5; k += 2) {
- GemmMicrokernelTester()
- .mr(1)
- .nr(2)
- .kr(1)
- .sr(1)
- .m(1)
- .n(n)
- .k(k)
- .a_stride(7)
- .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_1x2__scalar, xnn_init_qs8_conv_minmax_rndnu_scalar_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
- }
- }
-}
-
-TEST(QS8_GEMM_MINMAX_RNDNU_1X2__SCALAR, n_gt_2_subtile) {
- for (uint32_t n = 3; n < 4; n++) {
- for (size_t k = 1; k <= 5; k += 2) {
- for (uint32_t m = 1; m <= 1; m++) {
- GemmMicrokernelTester()
- .mr(1)
- .nr(2)
- .kr(1)
- .sr(1)
- .m(m)
- .n(n)
- .k(k)
- .iterations(1)
- .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_1x2__scalar, xnn_init_qs8_conv_minmax_rndnu_scalar_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
- }
- }
- }
-}
-
-TEST(QS8_GEMM_MINMAX_RNDNU_1X2__SCALAR, n_div_2) {
- for (uint32_t n = 4; n <= 6; n += 2) {
- for (size_t k = 1; k <= 5; k += 2) {
- GemmMicrokernelTester()
- .mr(1)
- .nr(2)
- .kr(1)
- .sr(1)
- .m(1)
- .n(2)
- .k(k)
- .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_1x2__scalar, xnn_init_qs8_conv_minmax_rndnu_scalar_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
- }
- }
-}
-
-TEST(QS8_GEMM_MINMAX_RNDNU_1X2__SCALAR, n_div_2_strided_cn) {
- for (uint32_t n = 4; n <= 6; n += 2) {
- for (size_t k = 1; k <= 5; k += 2) {
- GemmMicrokernelTester()
- .mr(1)
- .nr(2)
- .kr(1)
- .sr(1)
- .m(1)
- .n(n)
- .k(k)
- .cn_stride(5)
- .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_1x2__scalar, xnn_init_qs8_conv_minmax_rndnu_scalar_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
- }
- }
-}
-
-TEST(QS8_GEMM_MINMAX_RNDNU_1X2__SCALAR, n_div_2_strided_a) {
- for (uint32_t n = 4; n <= 6; n += 2) {
- for (size_t k = 1; k <= 5; k += 2) {
- GemmMicrokernelTester()
- .mr(1)
- .nr(2)
- .kr(1)
- .sr(1)
- .m(1)
- .n(n)
- .k(k)
- .a_stride(7)
- .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_1x2__scalar, xnn_init_qs8_conv_minmax_rndnu_scalar_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
- }
- }
-}
-
-TEST(QS8_GEMM_MINMAX_RNDNU_1X2__SCALAR, n_div_2_subtile) {
- for (uint32_t n = 4; n <= 6; n += 2) {
- for (size_t k = 1; k <= 5; k += 2) {
- for (uint32_t m = 1; m <= 1; m++) {
- GemmMicrokernelTester()
- .mr(1)
- .nr(2)
- .kr(1)
- .sr(1)
- .m(m)
- .n(n)
- .k(k)
- .iterations(1)
- .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_1x2__scalar, xnn_init_qs8_conv_minmax_rndnu_scalar_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
- }
- }
- }
-}
-
-TEST(QS8_GEMM_MINMAX_RNDNU_1X2__SCALAR, strided_cm_subtile) {
- for (size_t k = 1; k <= 5; k += 2) {
- for (uint32_t m = 1; m <= 1; m++) {
- for (uint32_t n = 1; n <= 2; n++) {
- GemmMicrokernelTester()
- .mr(1)
- .nr(2)
- .kr(1)
- .sr(1)
- .m(m)
- .n(n)
- .k(k)
- .cm_stride(5)
- .iterations(1)
- .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_1x2__scalar, xnn_init_qs8_conv_minmax_rndnu_scalar_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
- }
- }
- }
-}
-
-TEST(QS8_GEMM_MINMAX_RNDNU_1X2__SCALAR, qmin) {
- GemmMicrokernelTester()
- .mr(1)
- .nr(2)
- .kr(1)
- .sr(1)
- .m(1)
- .n(2)
- .k(1)
- .qmin(128)
- .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_1x2__scalar, xnn_init_qs8_conv_minmax_rndnu_scalar_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
-}
-
-TEST(QS8_GEMM_MINMAX_RNDNU_1X2__SCALAR, qmax) {
- GemmMicrokernelTester()
- .mr(1)
- .nr(2)
- .kr(1)
- .sr(1)
- .m(1)
- .n(2)
- .k(1)
- .qmax(128)
- .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_1x2__scalar, xnn_init_qs8_conv_minmax_rndnu_scalar_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
-}
-
-TEST(QS8_GEMM_MINMAX_RNDNU_1X2__SCALAR, strided_cm) {
- GemmMicrokernelTester()
- .mr(1)
- .nr(2)
- .kr(1)
- .sr(1)
- .m(1)
- .n(2)
- .k(1)
- .cm_stride(5)
- .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_1x2__scalar, xnn_init_qs8_conv_minmax_rndnu_scalar_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
-}
-
-
-TEST(QS8_GEMM_MINMAX_RNDNU_2X2__SCALAR, k_eq_1) {
- GemmMicrokernelTester()
- .mr(2)
- .nr(2)
- .kr(1)
- .sr(1)
- .m(2)
- .n(2)
- .k(1)
- .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_2x2__scalar, xnn_init_qs8_conv_minmax_rndnu_scalar_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
-}
-
-TEST(QS8_GEMM_MINMAX_RNDNU_2X2__SCALAR, strided_cn) {
- GemmMicrokernelTester()
- .mr(2)
- .nr(2)
- .kr(1)
- .sr(1)
- .m(2)
- .n(2)
- .k(1)
- .cn_stride(5)
- .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_2x2__scalar, xnn_init_qs8_conv_minmax_rndnu_scalar_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
-}
-
-TEST(QS8_GEMM_MINMAX_RNDNU_2X2__SCALAR, k_eq_1_strided_a) {
- GemmMicrokernelTester()
- .mr(2)
- .nr(2)
- .kr(1)
- .sr(1)
- .m(2)
- .n(2)
- .k(1)
- .a_stride(3)
- .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_2x2__scalar, xnn_init_qs8_conv_minmax_rndnu_scalar_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
-}
-
-TEST(QS8_GEMM_MINMAX_RNDNU_2X2__SCALAR, k_eq_1_subtile) {
- for (uint32_t m = 1; m <= 2; m++) {
- for (uint32_t n = 1; n <= 2; n++) {
- GemmMicrokernelTester()
- .mr(2)
- .nr(2)
- .kr(1)
- .sr(1)
- .m(m)
- .n(n)
- .k(1)
- .iterations(1)
- .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_2x2__scalar, xnn_init_qs8_conv_minmax_rndnu_scalar_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
- }
- }
-}
-
-TEST(QS8_GEMM_MINMAX_RNDNU_2X2__SCALAR, k_eq_1_subtile_m) {
- for (uint32_t m = 1; m <= 2; m++) {
- GemmMicrokernelTester()
- .mr(2)
- .nr(2)
- .kr(1)
- .sr(1)
- .m(m)
- .n(2)
- .k(1)
- .iterations(1)
- .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_2x2__scalar, xnn_init_qs8_conv_minmax_rndnu_scalar_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
- }
-}
-
-TEST(QS8_GEMM_MINMAX_RNDNU_2X2__SCALAR, k_eq_1_subtile_n) {
- for (uint32_t n = 1; n <= 2; n++) {
- GemmMicrokernelTester()
- .mr(2)
- .nr(2)
- .kr(1)
- .sr(1)
- .m(2)
- .n(n)
- .k(1)
- .iterations(1)
- .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_2x2__scalar, xnn_init_qs8_conv_minmax_rndnu_scalar_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
- }
-}
-
-TEST(QS8_GEMM_MINMAX_RNDNU_2X2__SCALAR, k_gt_1) {
- for (size_t k = 2; k < 10; k++) {
- GemmMicrokernelTester()
- .mr(2)
- .nr(2)
- .kr(1)
- .sr(1)
- .m(2)
- .n(2)
- .k(k)
- .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_2x2__scalar, xnn_init_qs8_conv_minmax_rndnu_scalar_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
- }
-}
-
-TEST(QS8_GEMM_MINMAX_RNDNU_2X2__SCALAR, k_gt_1_strided_a) {
- for (size_t k = 2; k < 10; k++) {
- GemmMicrokernelTester()
- .mr(2)
- .nr(2)
- .kr(1)
- .sr(1)
- .m(2)
- .n(2)
- .k(k)
- .a_stride(11)
- .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_2x2__scalar, xnn_init_qs8_conv_minmax_rndnu_scalar_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
- }
-}
-
-TEST(QS8_GEMM_MINMAX_RNDNU_2X2__SCALAR, k_gt_1_subtile) {
- for (size_t k = 2; k < 10; k++) {
- for (uint32_t m = 1; m <= 2; m++) {
- for (uint32_t n = 1; n <= 2; n++) {
- GemmMicrokernelTester()
- .mr(2)
- .nr(2)
- .kr(1)
- .sr(1)
- .m(m)
- .n(n)
- .k(k)
- .iterations(1)
- .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_2x2__scalar, xnn_init_qs8_conv_minmax_rndnu_scalar_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
- }
- }
- }
-}
-
-TEST(QS8_GEMM_MINMAX_RNDNU_2X2__SCALAR, n_gt_2) {
- for (uint32_t n = 3; n < 4; n++) {
- for (size_t k = 1; k <= 5; k += 2) {
- GemmMicrokernelTester()
- .mr(2)
- .nr(2)
- .kr(1)
- .sr(1)
- .m(2)
- .n(2)
- .k(k)
- .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_2x2__scalar, xnn_init_qs8_conv_minmax_rndnu_scalar_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
- }
- }
-}
-
-TEST(QS8_GEMM_MINMAX_RNDNU_2X2__SCALAR, n_gt_2_strided_cn) {
- for (uint32_t n = 3; n < 4; n++) {
- for (size_t k = 1; k <= 5; k += 2) {
- GemmMicrokernelTester()
- .mr(2)
- .nr(2)
- .kr(1)
- .sr(1)
- .m(2)
- .n(2)
- .k(k)
- .cn_stride(5)
- .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_2x2__scalar, xnn_init_qs8_conv_minmax_rndnu_scalar_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
- }
- }
-}
-
-TEST(QS8_GEMM_MINMAX_RNDNU_2X2__SCALAR, n_gt_2_strided_a) {
- for (uint32_t n = 3; n < 4; n++) {
- for (size_t k = 1; k <= 5; k += 2) {
- GemmMicrokernelTester()
- .mr(2)
- .nr(2)
- .kr(1)
- .sr(1)
- .m(2)
- .n(n)
- .k(k)
- .a_stride(7)
- .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_2x2__scalar, xnn_init_qs8_conv_minmax_rndnu_scalar_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
- }
- }
-}
-
-TEST(QS8_GEMM_MINMAX_RNDNU_2X2__SCALAR, n_gt_2_subtile) {
- for (uint32_t n = 3; n < 4; n++) {
- for (size_t k = 1; k <= 5; k += 2) {
- for (uint32_t m = 1; m <= 2; m++) {
- GemmMicrokernelTester()
- .mr(2)
- .nr(2)
- .kr(1)
- .sr(1)
- .m(m)
- .n(n)
- .k(k)
- .iterations(1)
- .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_2x2__scalar, xnn_init_qs8_conv_minmax_rndnu_scalar_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
- }
- }
- }
-}
-
-TEST(QS8_GEMM_MINMAX_RNDNU_2X2__SCALAR, n_div_2) {
- for (uint32_t n = 4; n <= 6; n += 2) {
- for (size_t k = 1; k <= 5; k += 2) {
- GemmMicrokernelTester()
- .mr(2)
- .nr(2)
- .kr(1)
- .sr(1)
- .m(2)
- .n(2)
- .k(k)
- .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_2x2__scalar, xnn_init_qs8_conv_minmax_rndnu_scalar_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
- }
- }
-}
-
-TEST(QS8_GEMM_MINMAX_RNDNU_2X2__SCALAR, n_div_2_strided_cn) {
- for (uint32_t n = 4; n <= 6; n += 2) {
- for (size_t k = 1; k <= 5; k += 2) {
- GemmMicrokernelTester()
- .mr(2)
- .nr(2)
- .kr(1)
- .sr(1)
- .m(2)
- .n(n)
- .k(k)
- .cn_stride(5)
- .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_2x2__scalar, xnn_init_qs8_conv_minmax_rndnu_scalar_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
- }
- }
-}
-
-TEST(QS8_GEMM_MINMAX_RNDNU_2X2__SCALAR, n_div_2_strided_a) {
- for (uint32_t n = 4; n <= 6; n += 2) {
- for (size_t k = 1; k <= 5; k += 2) {
- GemmMicrokernelTester()
- .mr(2)
- .nr(2)
- .kr(1)
- .sr(1)
- .m(2)
- .n(n)
- .k(k)
- .a_stride(7)
- .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_2x2__scalar, xnn_init_qs8_conv_minmax_rndnu_scalar_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
- }
- }
-}
-
-TEST(QS8_GEMM_MINMAX_RNDNU_2X2__SCALAR, n_div_2_subtile) {
- for (uint32_t n = 4; n <= 6; n += 2) {
- for (size_t k = 1; k <= 5; k += 2) {
- for (uint32_t m = 1; m <= 2; m++) {
- GemmMicrokernelTester()
- .mr(2)
- .nr(2)
- .kr(1)
- .sr(1)
- .m(m)
- .n(n)
- .k(k)
- .iterations(1)
- .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_2x2__scalar, xnn_init_qs8_conv_minmax_rndnu_scalar_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
- }
- }
- }
-}
-
-TEST(QS8_GEMM_MINMAX_RNDNU_2X2__SCALAR, strided_cm_subtile) {
- for (size_t k = 1; k <= 5; k += 2) {
- for (uint32_t m = 1; m <= 2; m++) {
- for (uint32_t n = 1; n <= 2; n++) {
- GemmMicrokernelTester()
- .mr(2)
- .nr(2)
- .kr(1)
- .sr(1)
- .m(m)
- .n(n)
- .k(k)
- .cm_stride(5)
- .iterations(1)
- .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_2x2__scalar, xnn_init_qs8_conv_minmax_rndnu_scalar_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
- }
- }
- }
-}
-
-TEST(QS8_GEMM_MINMAX_RNDNU_2X2__SCALAR, qmin) {
- GemmMicrokernelTester()
- .mr(2)
- .nr(2)
- .kr(1)
- .sr(1)
- .m(2)
- .n(2)
- .k(1)
- .qmin(128)
- .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_2x2__scalar, xnn_init_qs8_conv_minmax_rndnu_scalar_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
-}
-
-TEST(QS8_GEMM_MINMAX_RNDNU_2X2__SCALAR, qmax) {
- GemmMicrokernelTester()
- .mr(2)
- .nr(2)
- .kr(1)
- .sr(1)
- .m(2)
- .n(2)
- .k(1)
- .qmax(128)
- .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_2x2__scalar, xnn_init_qs8_conv_minmax_rndnu_scalar_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
-}
-
-TEST(QS8_GEMM_MINMAX_RNDNU_2X2__SCALAR, strided_cm) {
- GemmMicrokernelTester()
- .mr(2)
- .nr(2)
- .kr(1)
- .sr(1)
- .m(2)
- .n(2)
- .k(1)
- .cm_stride(5)
- .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_2x2__scalar, xnn_init_qs8_conv_minmax_rndnu_scalar_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
-}
-
-
-TEST(QS8_GEMM_MINMAX_RNDNU_3X2__SCALAR, k_eq_1) {
- GemmMicrokernelTester()
- .mr(3)
- .nr(2)
- .kr(1)
- .sr(1)
- .m(3)
- .n(2)
- .k(1)
- .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_3x2__scalar, xnn_init_qs8_conv_minmax_rndnu_scalar_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
-}
-
-TEST(QS8_GEMM_MINMAX_RNDNU_3X2__SCALAR, strided_cn) {
- GemmMicrokernelTester()
- .mr(3)
- .nr(2)
- .kr(1)
- .sr(1)
- .m(3)
- .n(2)
- .k(1)
- .cn_stride(5)
- .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_3x2__scalar, xnn_init_qs8_conv_minmax_rndnu_scalar_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
-}
-
-TEST(QS8_GEMM_MINMAX_RNDNU_3X2__SCALAR, k_eq_1_strided_a) {
- GemmMicrokernelTester()
- .mr(3)
- .nr(2)
- .kr(1)
- .sr(1)
- .m(3)
- .n(2)
- .k(1)
- .a_stride(3)
- .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_3x2__scalar, xnn_init_qs8_conv_minmax_rndnu_scalar_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
-}
-
-TEST(QS8_GEMM_MINMAX_RNDNU_3X2__SCALAR, k_eq_1_subtile) {
- for (uint32_t m = 1; m <= 3; m++) {
- for (uint32_t n = 1; n <= 2; n++) {
- GemmMicrokernelTester()
- .mr(3)
- .nr(2)
- .kr(1)
- .sr(1)
- .m(m)
- .n(n)
- .k(1)
- .iterations(1)
- .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_3x2__scalar, xnn_init_qs8_conv_minmax_rndnu_scalar_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
- }
- }
-}
-
-TEST(QS8_GEMM_MINMAX_RNDNU_3X2__SCALAR, k_eq_1_subtile_m) {
- for (uint32_t m = 1; m <= 3; m++) {
- GemmMicrokernelTester()
- .mr(3)
- .nr(2)
- .kr(1)
- .sr(1)
- .m(m)
- .n(2)
- .k(1)
- .iterations(1)
- .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_3x2__scalar, xnn_init_qs8_conv_minmax_rndnu_scalar_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
- }
-}
-
-TEST(QS8_GEMM_MINMAX_RNDNU_3X2__SCALAR, k_eq_1_subtile_n) {
- for (uint32_t n = 1; n <= 2; n++) {
- GemmMicrokernelTester()
- .mr(3)
- .nr(2)
- .kr(1)
- .sr(1)
- .m(3)
- .n(n)
- .k(1)
- .iterations(1)
- .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_3x2__scalar, xnn_init_qs8_conv_minmax_rndnu_scalar_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
- }
-}
-
-TEST(QS8_GEMM_MINMAX_RNDNU_3X2__SCALAR, k_gt_1) {
- for (size_t k = 2; k < 10; k++) {
- GemmMicrokernelTester()
- .mr(3)
- .nr(2)
- .kr(1)
- .sr(1)
- .m(3)
- .n(2)
- .k(k)
- .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_3x2__scalar, xnn_init_qs8_conv_minmax_rndnu_scalar_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
- }
-}
-
-TEST(QS8_GEMM_MINMAX_RNDNU_3X2__SCALAR, k_gt_1_strided_a) {
- for (size_t k = 2; k < 10; k++) {
- GemmMicrokernelTester()
- .mr(3)
- .nr(2)
- .kr(1)
- .sr(1)
- .m(3)
- .n(2)
- .k(k)
- .a_stride(11)
- .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_3x2__scalar, xnn_init_qs8_conv_minmax_rndnu_scalar_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
- }
-}
-
-TEST(QS8_GEMM_MINMAX_RNDNU_3X2__SCALAR, k_gt_1_subtile) {
- for (size_t k = 2; k < 10; k++) {
- for (uint32_t m = 1; m <= 3; m++) {
- for (uint32_t n = 1; n <= 2; n++) {
- GemmMicrokernelTester()
- .mr(3)
- .nr(2)
- .kr(1)
- .sr(1)
- .m(m)
- .n(n)
- .k(k)
- .iterations(1)
- .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_3x2__scalar, xnn_init_qs8_conv_minmax_rndnu_scalar_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
- }
- }
- }
-}
-
-TEST(QS8_GEMM_MINMAX_RNDNU_3X2__SCALAR, n_gt_2) {
- for (uint32_t n = 3; n < 4; n++) {
- for (size_t k = 1; k <= 5; k += 2) {
- GemmMicrokernelTester()
- .mr(3)
- .nr(2)
- .kr(1)
- .sr(1)
- .m(3)
- .n(2)
- .k(k)
- .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_3x2__scalar, xnn_init_qs8_conv_minmax_rndnu_scalar_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
- }
- }
-}
-
-TEST(QS8_GEMM_MINMAX_RNDNU_3X2__SCALAR, n_gt_2_strided_cn) {
- for (uint32_t n = 3; n < 4; n++) {
- for (size_t k = 1; k <= 5; k += 2) {
- GemmMicrokernelTester()
- .mr(3)
- .nr(2)
- .kr(1)
- .sr(1)
- .m(3)
- .n(2)
- .k(k)
- .cn_stride(5)
- .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_3x2__scalar, xnn_init_qs8_conv_minmax_rndnu_scalar_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
- }
- }
-}
-
-TEST(QS8_GEMM_MINMAX_RNDNU_3X2__SCALAR, n_gt_2_strided_a) {
- for (uint32_t n = 3; n < 4; n++) {
- for (size_t k = 1; k <= 5; k += 2) {
- GemmMicrokernelTester()
- .mr(3)
- .nr(2)
- .kr(1)
- .sr(1)
- .m(3)
- .n(n)
- .k(k)
- .a_stride(7)
- .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_3x2__scalar, xnn_init_qs8_conv_minmax_rndnu_scalar_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
- }
- }
-}
-
-TEST(QS8_GEMM_MINMAX_RNDNU_3X2__SCALAR, n_gt_2_subtile) {
- for (uint32_t n = 3; n < 4; n++) {
- for (size_t k = 1; k <= 5; k += 2) {
- for (uint32_t m = 1; m <= 3; m++) {
- GemmMicrokernelTester()
- .mr(3)
- .nr(2)
- .kr(1)
- .sr(1)
- .m(m)
- .n(n)
- .k(k)
- .iterations(1)
- .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_3x2__scalar, xnn_init_qs8_conv_minmax_rndnu_scalar_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
- }
- }
- }
-}
-
-TEST(QS8_GEMM_MINMAX_RNDNU_3X2__SCALAR, n_div_2) {
- for (uint32_t n = 4; n <= 6; n += 2) {
- for (size_t k = 1; k <= 5; k += 2) {
- GemmMicrokernelTester()
- .mr(3)
- .nr(2)
- .kr(1)
- .sr(1)
- .m(3)
- .n(2)
- .k(k)
- .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_3x2__scalar, xnn_init_qs8_conv_minmax_rndnu_scalar_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
- }
- }
-}
-
-TEST(QS8_GEMM_MINMAX_RNDNU_3X2__SCALAR, n_div_2_strided_cn) {
- for (uint32_t n = 4; n <= 6; n += 2) {
- for (size_t k = 1; k <= 5; k += 2) {
- GemmMicrokernelTester()
- .mr(3)
- .nr(2)
- .kr(1)
- .sr(1)
- .m(3)
- .n(n)
- .k(k)
- .cn_stride(5)
- .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_3x2__scalar, xnn_init_qs8_conv_minmax_rndnu_scalar_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
- }
- }
-}
-
-TEST(QS8_GEMM_MINMAX_RNDNU_3X2__SCALAR, n_div_2_strided_a) {
- for (uint32_t n = 4; n <= 6; n += 2) {
- for (size_t k = 1; k <= 5; k += 2) {
- GemmMicrokernelTester()
- .mr(3)
- .nr(2)
- .kr(1)
- .sr(1)
- .m(3)
- .n(n)
- .k(k)
- .a_stride(7)
- .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_3x2__scalar, xnn_init_qs8_conv_minmax_rndnu_scalar_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
- }
- }
-}
-
-TEST(QS8_GEMM_MINMAX_RNDNU_3X2__SCALAR, n_div_2_subtile) {
- for (uint32_t n = 4; n <= 6; n += 2) {
- for (size_t k = 1; k <= 5; k += 2) {
- for (uint32_t m = 1; m <= 3; m++) {
- GemmMicrokernelTester()
- .mr(3)
- .nr(2)
- .kr(1)
- .sr(1)
- .m(m)
- .n(n)
- .k(k)
- .iterations(1)
- .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_3x2__scalar, xnn_init_qs8_conv_minmax_rndnu_scalar_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
- }
- }
- }
-}
-
-TEST(QS8_GEMM_MINMAX_RNDNU_3X2__SCALAR, strided_cm_subtile) {
- for (size_t k = 1; k <= 5; k += 2) {
- for (uint32_t m = 1; m <= 3; m++) {
- for (uint32_t n = 1; n <= 2; n++) {
- GemmMicrokernelTester()
- .mr(3)
- .nr(2)
- .kr(1)
- .sr(1)
- .m(m)
- .n(n)
- .k(k)
- .cm_stride(5)
- .iterations(1)
- .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_3x2__scalar, xnn_init_qs8_conv_minmax_rndnu_scalar_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
- }
- }
- }
-}
-
-TEST(QS8_GEMM_MINMAX_RNDNU_3X2__SCALAR, qmin) {
- GemmMicrokernelTester()
- .mr(3)
- .nr(2)
- .kr(1)
- .sr(1)
- .m(3)
- .n(2)
- .k(1)
- .qmin(128)
- .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_3x2__scalar, xnn_init_qs8_conv_minmax_rndnu_scalar_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
-}
-
-TEST(QS8_GEMM_MINMAX_RNDNU_3X2__SCALAR, qmax) {
- GemmMicrokernelTester()
- .mr(3)
- .nr(2)
- .kr(1)
- .sr(1)
- .m(3)
- .n(2)
- .k(1)
- .qmax(128)
- .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_3x2__scalar, xnn_init_qs8_conv_minmax_rndnu_scalar_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
-}
-
-TEST(QS8_GEMM_MINMAX_RNDNU_3X2__SCALAR, strided_cm) {
- GemmMicrokernelTester()
- .mr(3)
- .nr(2)
- .kr(1)
- .sr(1)
- .m(3)
- .n(2)
- .k(1)
- .cm_stride(5)
- .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_3x2__scalar, xnn_init_qs8_conv_minmax_rndnu_scalar_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
-}
-
-
-TEST(QS8_GEMM_MINMAX_RNDNU_4X2__SCALAR, k_eq_1) {
- GemmMicrokernelTester()
- .mr(4)
- .nr(2)
- .kr(1)
- .sr(1)
- .m(4)
- .n(2)
- .k(1)
- .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_4x2__scalar, xnn_init_qs8_conv_minmax_rndnu_scalar_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
-}
-
-TEST(QS8_GEMM_MINMAX_RNDNU_4X2__SCALAR, strided_cn) {
- GemmMicrokernelTester()
- .mr(4)
- .nr(2)
- .kr(1)
- .sr(1)
- .m(4)
- .n(2)
- .k(1)
- .cn_stride(5)
- .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_4x2__scalar, xnn_init_qs8_conv_minmax_rndnu_scalar_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
-}
-
-TEST(QS8_GEMM_MINMAX_RNDNU_4X2__SCALAR, k_eq_1_strided_a) {
- GemmMicrokernelTester()
- .mr(4)
- .nr(2)
- .kr(1)
- .sr(1)
- .m(4)
- .n(2)
- .k(1)
- .a_stride(3)
- .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_4x2__scalar, xnn_init_qs8_conv_minmax_rndnu_scalar_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
-}
-
-TEST(QS8_GEMM_MINMAX_RNDNU_4X2__SCALAR, k_eq_1_subtile) {
- for (uint32_t m = 1; m <= 4; m++) {
- for (uint32_t n = 1; n <= 2; n++) {
- GemmMicrokernelTester()
- .mr(4)
- .nr(2)
- .kr(1)
- .sr(1)
- .m(m)
- .n(n)
- .k(1)
- .iterations(1)
- .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_4x2__scalar, xnn_init_qs8_conv_minmax_rndnu_scalar_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
- }
- }
-}
-
-TEST(QS8_GEMM_MINMAX_RNDNU_4X2__SCALAR, k_eq_1_subtile_m) {
- for (uint32_t m = 1; m <= 4; m++) {
- GemmMicrokernelTester()
- .mr(4)
- .nr(2)
- .kr(1)
- .sr(1)
- .m(m)
- .n(2)
- .k(1)
- .iterations(1)
- .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_4x2__scalar, xnn_init_qs8_conv_minmax_rndnu_scalar_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
- }
-}
-
-TEST(QS8_GEMM_MINMAX_RNDNU_4X2__SCALAR, k_eq_1_subtile_n) {
- for (uint32_t n = 1; n <= 2; n++) {
- GemmMicrokernelTester()
- .mr(4)
- .nr(2)
- .kr(1)
- .sr(1)
- .m(4)
- .n(n)
- .k(1)
- .iterations(1)
- .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_4x2__scalar, xnn_init_qs8_conv_minmax_rndnu_scalar_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
- }
-}
-
-TEST(QS8_GEMM_MINMAX_RNDNU_4X2__SCALAR, k_gt_1) {
- for (size_t k = 2; k < 10; k++) {
- GemmMicrokernelTester()
- .mr(4)
- .nr(2)
- .kr(1)
- .sr(1)
- .m(4)
- .n(2)
- .k(k)
- .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_4x2__scalar, xnn_init_qs8_conv_minmax_rndnu_scalar_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
- }
-}
-
-TEST(QS8_GEMM_MINMAX_RNDNU_4X2__SCALAR, k_gt_1_strided_a) {
- for (size_t k = 2; k < 10; k++) {
- GemmMicrokernelTester()
- .mr(4)
- .nr(2)
- .kr(1)
- .sr(1)
- .m(4)
- .n(2)
- .k(k)
- .a_stride(11)
- .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_4x2__scalar, xnn_init_qs8_conv_minmax_rndnu_scalar_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
- }
-}
-
-TEST(QS8_GEMM_MINMAX_RNDNU_4X2__SCALAR, k_gt_1_subtile) {
- for (size_t k = 2; k < 10; k++) {
- for (uint32_t m = 1; m <= 4; m++) {
- for (uint32_t n = 1; n <= 2; n++) {
- GemmMicrokernelTester()
- .mr(4)
- .nr(2)
- .kr(1)
- .sr(1)
- .m(m)
- .n(n)
- .k(k)
- .iterations(1)
- .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_4x2__scalar, xnn_init_qs8_conv_minmax_rndnu_scalar_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
- }
- }
- }
-}
-
-TEST(QS8_GEMM_MINMAX_RNDNU_4X2__SCALAR, n_gt_2) {
- for (uint32_t n = 3; n < 4; n++) {
- for (size_t k = 1; k <= 5; k += 2) {
- GemmMicrokernelTester()
- .mr(4)
- .nr(2)
- .kr(1)
- .sr(1)
- .m(4)
- .n(2)
- .k(k)
- .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_4x2__scalar, xnn_init_qs8_conv_minmax_rndnu_scalar_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
- }
- }
-}
-
-TEST(QS8_GEMM_MINMAX_RNDNU_4X2__SCALAR, n_gt_2_strided_cn) {
- for (uint32_t n = 3; n < 4; n++) {
- for (size_t k = 1; k <= 5; k += 2) {
- GemmMicrokernelTester()
- .mr(4)
- .nr(2)
- .kr(1)
- .sr(1)
- .m(4)
- .n(2)
- .k(k)
- .cn_stride(5)
- .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_4x2__scalar, xnn_init_qs8_conv_minmax_rndnu_scalar_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
- }
- }
-}
-
-TEST(QS8_GEMM_MINMAX_RNDNU_4X2__SCALAR, n_gt_2_strided_a) {
- for (uint32_t n = 3; n < 4; n++) {
- for (size_t k = 1; k <= 5; k += 2) {
- GemmMicrokernelTester()
- .mr(4)
- .nr(2)
- .kr(1)
- .sr(1)
- .m(4)
- .n(n)
- .k(k)
- .a_stride(7)
- .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_4x2__scalar, xnn_init_qs8_conv_minmax_rndnu_scalar_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
- }
- }
-}
-
-TEST(QS8_GEMM_MINMAX_RNDNU_4X2__SCALAR, n_gt_2_subtile) {
- for (uint32_t n = 3; n < 4; n++) {
- for (size_t k = 1; k <= 5; k += 2) {
- for (uint32_t m = 1; m <= 4; m++) {
- GemmMicrokernelTester()
- .mr(4)
- .nr(2)
- .kr(1)
- .sr(1)
- .m(m)
- .n(n)
- .k(k)
- .iterations(1)
- .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_4x2__scalar, xnn_init_qs8_conv_minmax_rndnu_scalar_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
- }
- }
- }
-}
-
-TEST(QS8_GEMM_MINMAX_RNDNU_4X2__SCALAR, n_div_2) {
- for (uint32_t n = 4; n <= 6; n += 2) {
- for (size_t k = 1; k <= 5; k += 2) {
- GemmMicrokernelTester()
- .mr(4)
- .nr(2)
- .kr(1)
- .sr(1)
- .m(4)
- .n(2)
- .k(k)
- .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_4x2__scalar, xnn_init_qs8_conv_minmax_rndnu_scalar_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
- }
- }
-}
-
-TEST(QS8_GEMM_MINMAX_RNDNU_4X2__SCALAR, n_div_2_strided_cn) {
- for (uint32_t n = 4; n <= 6; n += 2) {
- for (size_t k = 1; k <= 5; k += 2) {
- GemmMicrokernelTester()
- .mr(4)
- .nr(2)
- .kr(1)
- .sr(1)
- .m(4)
- .n(n)
- .k(k)
- .cn_stride(5)
- .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_4x2__scalar, xnn_init_qs8_conv_minmax_rndnu_scalar_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
- }
- }
-}
-
-TEST(QS8_GEMM_MINMAX_RNDNU_4X2__SCALAR, n_div_2_strided_a) {
- for (uint32_t n = 4; n <= 6; n += 2) {
- for (size_t k = 1; k <= 5; k += 2) {
- GemmMicrokernelTester()
- .mr(4)
- .nr(2)
- .kr(1)
- .sr(1)
- .m(4)
- .n(n)
- .k(k)
- .a_stride(7)
- .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_4x2__scalar, xnn_init_qs8_conv_minmax_rndnu_scalar_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
- }
- }
-}
-
-TEST(QS8_GEMM_MINMAX_RNDNU_4X2__SCALAR, n_div_2_subtile) {
- for (uint32_t n = 4; n <= 6; n += 2) {
- for (size_t k = 1; k <= 5; k += 2) {
- for (uint32_t m = 1; m <= 4; m++) {
- GemmMicrokernelTester()
- .mr(4)
- .nr(2)
- .kr(1)
- .sr(1)
- .m(m)
- .n(n)
- .k(k)
- .iterations(1)
- .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_4x2__scalar, xnn_init_qs8_conv_minmax_rndnu_scalar_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
- }
- }
- }
-}
-
-TEST(QS8_GEMM_MINMAX_RNDNU_4X2__SCALAR, strided_cm_subtile) {
- for (size_t k = 1; k <= 5; k += 2) {
- for (uint32_t m = 1; m <= 4; m++) {
- for (uint32_t n = 1; n <= 2; n++) {
- GemmMicrokernelTester()
- .mr(4)
- .nr(2)
- .kr(1)
- .sr(1)
- .m(m)
- .n(n)
- .k(k)
- .cm_stride(5)
- .iterations(1)
- .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_4x2__scalar, xnn_init_qs8_conv_minmax_rndnu_scalar_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
- }
- }
- }
-}
-
-TEST(QS8_GEMM_MINMAX_RNDNU_4X2__SCALAR, qmin) {
- GemmMicrokernelTester()
- .mr(4)
- .nr(2)
- .kr(1)
- .sr(1)
- .m(4)
- .n(2)
- .k(1)
- .qmin(128)
- .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_4x2__scalar, xnn_init_qs8_conv_minmax_rndnu_scalar_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
-}
-
-TEST(QS8_GEMM_MINMAX_RNDNU_4X2__SCALAR, qmax) {
- GemmMicrokernelTester()
- .mr(4)
- .nr(2)
- .kr(1)
- .sr(1)
- .m(4)
- .n(2)
- .k(1)
- .qmax(128)
- .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_4x2__scalar, xnn_init_qs8_conv_minmax_rndnu_scalar_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
-}
-
-TEST(QS8_GEMM_MINMAX_RNDNU_4X2__SCALAR, strided_cm) {
- GemmMicrokernelTester()
- .mr(4)
- .nr(2)
- .kr(1)
- .sr(1)
- .m(4)
- .n(2)
- .k(1)
- .cm_stride(5)
- .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_4x2__scalar, xnn_init_qs8_conv_minmax_rndnu_scalar_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
-}
-
-
-TEST(QS8_GEMM_MINMAX_RNDNU_1X4__SCALAR, k_eq_1) {
- GemmMicrokernelTester()
- .mr(1)
- .nr(4)
- .kr(1)
- .sr(1)
- .m(1)
- .n(4)
- .k(1)
- .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_1x4__scalar, xnn_init_qs8_conv_minmax_rndnu_scalar_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
-}
-
-TEST(QS8_GEMM_MINMAX_RNDNU_1X4__SCALAR, strided_cn) {
- GemmMicrokernelTester()
- .mr(1)
- .nr(4)
- .kr(1)
- .sr(1)
- .m(1)
- .n(4)
- .k(1)
- .cn_stride(7)
- .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_1x4__scalar, xnn_init_qs8_conv_minmax_rndnu_scalar_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
-}
-
-TEST(QS8_GEMM_MINMAX_RNDNU_1X4__SCALAR, k_eq_1_strided_a) {
- GemmMicrokernelTester()
- .mr(1)
- .nr(4)
- .kr(1)
- .sr(1)
- .m(1)
- .n(4)
- .k(1)
- .a_stride(3)
- .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_1x4__scalar, xnn_init_qs8_conv_minmax_rndnu_scalar_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
-}
-
-TEST(QS8_GEMM_MINMAX_RNDNU_1X4__SCALAR, k_eq_1_subtile) {
- for (uint32_t m = 1; m <= 1; m++) {
- for (uint32_t n = 1; n <= 4; n++) {
- GemmMicrokernelTester()
- .mr(1)
- .nr(4)
- .kr(1)
- .sr(1)
- .m(m)
- .n(n)
- .k(1)
- .iterations(1)
- .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_1x4__scalar, xnn_init_qs8_conv_minmax_rndnu_scalar_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
- }
- }
-}
-
-TEST(QS8_GEMM_MINMAX_RNDNU_1X4__SCALAR, k_eq_1_subtile_m) {
- for (uint32_t m = 1; m <= 1; m++) {
- GemmMicrokernelTester()
- .mr(1)
- .nr(4)
- .kr(1)
- .sr(1)
- .m(m)
- .n(4)
- .k(1)
- .iterations(1)
- .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_1x4__scalar, xnn_init_qs8_conv_minmax_rndnu_scalar_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
- }
-}
-
-TEST(QS8_GEMM_MINMAX_RNDNU_1X4__SCALAR, k_eq_1_subtile_n) {
- for (uint32_t n = 1; n <= 4; n++) {
- GemmMicrokernelTester()
- .mr(1)
- .nr(4)
- .kr(1)
- .sr(1)
- .m(1)
- .n(n)
- .k(1)
- .iterations(1)
- .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_1x4__scalar, xnn_init_qs8_conv_minmax_rndnu_scalar_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
- }
-}
-
-TEST(QS8_GEMM_MINMAX_RNDNU_1X4__SCALAR, k_gt_1) {
- for (size_t k = 2; k < 10; k++) {
- GemmMicrokernelTester()
- .mr(1)
- .nr(4)
- .kr(1)
- .sr(1)
- .m(1)
- .n(4)
- .k(k)
- .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_1x4__scalar, xnn_init_qs8_conv_minmax_rndnu_scalar_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
- }
-}
-
-TEST(QS8_GEMM_MINMAX_RNDNU_1X4__SCALAR, k_gt_1_strided_a) {
- for (size_t k = 2; k < 10; k++) {
- GemmMicrokernelTester()
- .mr(1)
- .nr(4)
- .kr(1)
- .sr(1)
- .m(1)
- .n(4)
- .k(k)
- .a_stride(11)
- .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_1x4__scalar, xnn_init_qs8_conv_minmax_rndnu_scalar_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
- }
-}
-
-TEST(QS8_GEMM_MINMAX_RNDNU_1X4__SCALAR, k_gt_1_subtile) {
- for (size_t k = 2; k < 10; k++) {
- for (uint32_t m = 1; m <= 1; m++) {
- for (uint32_t n = 1; n <= 4; n++) {
- GemmMicrokernelTester()
- .mr(1)
- .nr(4)
- .kr(1)
- .sr(1)
- .m(m)
- .n(n)
- .k(k)
- .iterations(1)
- .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_1x4__scalar, xnn_init_qs8_conv_minmax_rndnu_scalar_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
- }
- }
- }
-}
-
-TEST(QS8_GEMM_MINMAX_RNDNU_1X4__SCALAR, n_gt_4) {
- for (uint32_t n = 5; n < 8; n++) {
- for (size_t k = 1; k <= 5; k += 2) {
- GemmMicrokernelTester()
- .mr(1)
- .nr(4)
- .kr(1)
- .sr(1)
- .m(1)
- .n(4)
- .k(k)
- .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_1x4__scalar, xnn_init_qs8_conv_minmax_rndnu_scalar_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
- }
- }
-}
-
-TEST(QS8_GEMM_MINMAX_RNDNU_1X4__SCALAR, n_gt_4_strided_cn) {
- for (uint32_t n = 5; n < 8; n++) {
- for (size_t k = 1; k <= 5; k += 2) {
- GemmMicrokernelTester()
- .mr(1)
- .nr(4)
- .kr(1)
- .sr(1)
- .m(1)
- .n(4)
- .k(k)
- .cn_stride(7)
- .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_1x4__scalar, xnn_init_qs8_conv_minmax_rndnu_scalar_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
- }
- }
-}
-
-TEST(QS8_GEMM_MINMAX_RNDNU_1X4__SCALAR, n_gt_4_strided_a) {
- for (uint32_t n = 5; n < 8; n++) {
- for (size_t k = 1; k <= 5; k += 2) {
- GemmMicrokernelTester()
- .mr(1)
- .nr(4)
- .kr(1)
- .sr(1)
- .m(1)
- .n(n)
- .k(k)
- .a_stride(7)
- .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_1x4__scalar, xnn_init_qs8_conv_minmax_rndnu_scalar_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
- }
- }
-}
-
-TEST(QS8_GEMM_MINMAX_RNDNU_1X4__SCALAR, n_gt_4_subtile) {
- for (uint32_t n = 5; n < 8; n++) {
- for (size_t k = 1; k <= 5; k += 2) {
- for (uint32_t m = 1; m <= 1; m++) {
- GemmMicrokernelTester()
- .mr(1)
- .nr(4)
- .kr(1)
- .sr(1)
- .m(m)
- .n(n)
- .k(k)
- .iterations(1)
- .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_1x4__scalar, xnn_init_qs8_conv_minmax_rndnu_scalar_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
- }
- }
- }
-}
-
-TEST(QS8_GEMM_MINMAX_RNDNU_1X4__SCALAR, n_div_4) {
- for (uint32_t n = 8; n <= 12; n += 4) {
- for (size_t k = 1; k <= 5; k += 2) {
- GemmMicrokernelTester()
- .mr(1)
- .nr(4)
- .kr(1)
- .sr(1)
- .m(1)
- .n(4)
- .k(k)
- .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_1x4__scalar, xnn_init_qs8_conv_minmax_rndnu_scalar_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
- }
- }
-}
-
-TEST(QS8_GEMM_MINMAX_RNDNU_1X4__SCALAR, n_div_4_strided_cn) {
- for (uint32_t n = 8; n <= 12; n += 4) {
- for (size_t k = 1; k <= 5; k += 2) {
- GemmMicrokernelTester()
- .mr(1)
- .nr(4)
- .kr(1)
- .sr(1)
- .m(1)
- .n(n)
- .k(k)
- .cn_stride(7)
- .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_1x4__scalar, xnn_init_qs8_conv_minmax_rndnu_scalar_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
- }
- }
-}
-
-TEST(QS8_GEMM_MINMAX_RNDNU_1X4__SCALAR, n_div_4_strided_a) {
- for (uint32_t n = 8; n <= 12; n += 4) {
- for (size_t k = 1; k <= 5; k += 2) {
- GemmMicrokernelTester()
- .mr(1)
- .nr(4)
- .kr(1)
- .sr(1)
- .m(1)
- .n(n)
- .k(k)
- .a_stride(7)
- .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_1x4__scalar, xnn_init_qs8_conv_minmax_rndnu_scalar_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
- }
- }
-}
-
-TEST(QS8_GEMM_MINMAX_RNDNU_1X4__SCALAR, n_div_4_subtile) {
- for (uint32_t n = 8; n <= 12; n += 4) {
- for (size_t k = 1; k <= 5; k += 2) {
- for (uint32_t m = 1; m <= 1; m++) {
- GemmMicrokernelTester()
- .mr(1)
- .nr(4)
- .kr(1)
- .sr(1)
- .m(m)
- .n(n)
- .k(k)
- .iterations(1)
- .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_1x4__scalar, xnn_init_qs8_conv_minmax_rndnu_scalar_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
- }
- }
- }
-}
-
-TEST(QS8_GEMM_MINMAX_RNDNU_1X4__SCALAR, strided_cm_subtile) {
- for (size_t k = 1; k <= 5; k += 2) {
- for (uint32_t m = 1; m <= 1; m++) {
- for (uint32_t n = 1; n <= 4; n++) {
- GemmMicrokernelTester()
- .mr(1)
- .nr(4)
- .kr(1)
- .sr(1)
- .m(m)
- .n(n)
- .k(k)
- .cm_stride(7)
- .iterations(1)
- .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_1x4__scalar, xnn_init_qs8_conv_minmax_rndnu_scalar_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
- }
- }
- }
-}
-
-TEST(QS8_GEMM_MINMAX_RNDNU_1X4__SCALAR, qmin) {
- GemmMicrokernelTester()
- .mr(1)
- .nr(4)
- .kr(1)
- .sr(1)
- .m(1)
- .n(4)
- .k(1)
- .qmin(128)
- .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_1x4__scalar, xnn_init_qs8_conv_minmax_rndnu_scalar_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
-}
-
-TEST(QS8_GEMM_MINMAX_RNDNU_1X4__SCALAR, qmax) {
- GemmMicrokernelTester()
- .mr(1)
- .nr(4)
- .kr(1)
- .sr(1)
- .m(1)
- .n(4)
- .k(1)
- .qmax(128)
- .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_1x4__scalar, xnn_init_qs8_conv_minmax_rndnu_scalar_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
-}
-
-TEST(QS8_GEMM_MINMAX_RNDNU_1X4__SCALAR, strided_cm) {
- GemmMicrokernelTester()
- .mr(1)
- .nr(4)
- .kr(1)
- .sr(1)
- .m(1)
- .n(4)
- .k(1)
- .cm_stride(7)
- .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_1x4__scalar, xnn_init_qs8_conv_minmax_rndnu_scalar_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
-}
-
-
-TEST(QS8_GEMM_MINMAX_RNDNU_2X4__SCALAR, k_eq_1) {
- GemmMicrokernelTester()
- .mr(2)
- .nr(4)
- .kr(1)
- .sr(1)
- .m(2)
- .n(4)
- .k(1)
- .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_2x4__scalar, xnn_init_qs8_conv_minmax_rndnu_scalar_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
-}
-
-TEST(QS8_GEMM_MINMAX_RNDNU_2X4__SCALAR, strided_cn) {
- GemmMicrokernelTester()
- .mr(2)
- .nr(4)
- .kr(1)
- .sr(1)
- .m(2)
- .n(4)
- .k(1)
- .cn_stride(7)
- .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_2x4__scalar, xnn_init_qs8_conv_minmax_rndnu_scalar_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
-}
-
-TEST(QS8_GEMM_MINMAX_RNDNU_2X4__SCALAR, k_eq_1_strided_a) {
- GemmMicrokernelTester()
- .mr(2)
- .nr(4)
- .kr(1)
- .sr(1)
- .m(2)
- .n(4)
- .k(1)
- .a_stride(3)
- .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_2x4__scalar, xnn_init_qs8_conv_minmax_rndnu_scalar_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
-}
-
-TEST(QS8_GEMM_MINMAX_RNDNU_2X4__SCALAR, k_eq_1_subtile) {
- for (uint32_t m = 1; m <= 2; m++) {
- for (uint32_t n = 1; n <= 4; n++) {
- GemmMicrokernelTester()
- .mr(2)
- .nr(4)
- .kr(1)
- .sr(1)
- .m(m)
- .n(n)
- .k(1)
- .iterations(1)
- .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_2x4__scalar, xnn_init_qs8_conv_minmax_rndnu_scalar_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
- }
- }
-}
-
-TEST(QS8_GEMM_MINMAX_RNDNU_2X4__SCALAR, k_eq_1_subtile_m) {
- for (uint32_t m = 1; m <= 2; m++) {
- GemmMicrokernelTester()
- .mr(2)
- .nr(4)
- .kr(1)
- .sr(1)
- .m(m)
- .n(4)
- .k(1)
- .iterations(1)
- .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_2x4__scalar, xnn_init_qs8_conv_minmax_rndnu_scalar_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
- }
-}
-
-TEST(QS8_GEMM_MINMAX_RNDNU_2X4__SCALAR, k_eq_1_subtile_n) {
- for (uint32_t n = 1; n <= 4; n++) {
- GemmMicrokernelTester()
- .mr(2)
- .nr(4)
- .kr(1)
- .sr(1)
- .m(2)
- .n(n)
- .k(1)
- .iterations(1)
- .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_2x4__scalar, xnn_init_qs8_conv_minmax_rndnu_scalar_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
- }
-}
-
-TEST(QS8_GEMM_MINMAX_RNDNU_2X4__SCALAR, k_gt_1) {
- for (size_t k = 2; k < 10; k++) {
- GemmMicrokernelTester()
- .mr(2)
- .nr(4)
- .kr(1)
- .sr(1)
- .m(2)
- .n(4)
- .k(k)
- .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_2x4__scalar, xnn_init_qs8_conv_minmax_rndnu_scalar_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
- }
-}
-
-TEST(QS8_GEMM_MINMAX_RNDNU_2X4__SCALAR, k_gt_1_strided_a) {
- for (size_t k = 2; k < 10; k++) {
- GemmMicrokernelTester()
- .mr(2)
- .nr(4)
- .kr(1)
- .sr(1)
- .m(2)
- .n(4)
- .k(k)
- .a_stride(11)
- .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_2x4__scalar, xnn_init_qs8_conv_minmax_rndnu_scalar_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
- }
-}
-
-TEST(QS8_GEMM_MINMAX_RNDNU_2X4__SCALAR, k_gt_1_subtile) {
- for (size_t k = 2; k < 10; k++) {
- for (uint32_t m = 1; m <= 2; m++) {
- for (uint32_t n = 1; n <= 4; n++) {
- GemmMicrokernelTester()
- .mr(2)
- .nr(4)
- .kr(1)
- .sr(1)
- .m(m)
- .n(n)
- .k(k)
- .iterations(1)
- .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_2x4__scalar, xnn_init_qs8_conv_minmax_rndnu_scalar_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
- }
- }
- }
-}
-
-TEST(QS8_GEMM_MINMAX_RNDNU_2X4__SCALAR, n_gt_4) {
- for (uint32_t n = 5; n < 8; n++) {
- for (size_t k = 1; k <= 5; k += 2) {
- GemmMicrokernelTester()
- .mr(2)
- .nr(4)
- .kr(1)
- .sr(1)
- .m(2)
- .n(4)
- .k(k)
- .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_2x4__scalar, xnn_init_qs8_conv_minmax_rndnu_scalar_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
- }
- }
-}
-
-TEST(QS8_GEMM_MINMAX_RNDNU_2X4__SCALAR, n_gt_4_strided_cn) {
- for (uint32_t n = 5; n < 8; n++) {
- for (size_t k = 1; k <= 5; k += 2) {
- GemmMicrokernelTester()
- .mr(2)
- .nr(4)
- .kr(1)
- .sr(1)
- .m(2)
- .n(4)
- .k(k)
- .cn_stride(7)
- .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_2x4__scalar, xnn_init_qs8_conv_minmax_rndnu_scalar_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
- }
- }
-}
-
-TEST(QS8_GEMM_MINMAX_RNDNU_2X4__SCALAR, n_gt_4_strided_a) {
- for (uint32_t n = 5; n < 8; n++) {
- for (size_t k = 1; k <= 5; k += 2) {
- GemmMicrokernelTester()
- .mr(2)
- .nr(4)
- .kr(1)
- .sr(1)
- .m(2)
- .n(n)
- .k(k)
- .a_stride(7)
- .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_2x4__scalar, xnn_init_qs8_conv_minmax_rndnu_scalar_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
- }
- }
-}
-
-TEST(QS8_GEMM_MINMAX_RNDNU_2X4__SCALAR, n_gt_4_subtile) {
- for (uint32_t n = 5; n < 8; n++) {
- for (size_t k = 1; k <= 5; k += 2) {
- for (uint32_t m = 1; m <= 2; m++) {
- GemmMicrokernelTester()
- .mr(2)
- .nr(4)
- .kr(1)
- .sr(1)
- .m(m)
- .n(n)
- .k(k)
- .iterations(1)
- .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_2x4__scalar, xnn_init_qs8_conv_minmax_rndnu_scalar_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
- }
- }
- }
-}
-
-TEST(QS8_GEMM_MINMAX_RNDNU_2X4__SCALAR, n_div_4) {
- for (uint32_t n = 8; n <= 12; n += 4) {
- for (size_t k = 1; k <= 5; k += 2) {
- GemmMicrokernelTester()
- .mr(2)
- .nr(4)
- .kr(1)
- .sr(1)
- .m(2)
- .n(4)
- .k(k)
- .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_2x4__scalar, xnn_init_qs8_conv_minmax_rndnu_scalar_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
- }
- }
-}
-
-TEST(QS8_GEMM_MINMAX_RNDNU_2X4__SCALAR, n_div_4_strided_cn) {
- for (uint32_t n = 8; n <= 12; n += 4) {
- for (size_t k = 1; k <= 5; k += 2) {
- GemmMicrokernelTester()
- .mr(2)
- .nr(4)
- .kr(1)
- .sr(1)
- .m(2)
- .n(n)
- .k(k)
- .cn_stride(7)
- .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_2x4__scalar, xnn_init_qs8_conv_minmax_rndnu_scalar_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
- }
- }
-}
-
-TEST(QS8_GEMM_MINMAX_RNDNU_2X4__SCALAR, n_div_4_strided_a) {
- for (uint32_t n = 8; n <= 12; n += 4) {
- for (size_t k = 1; k <= 5; k += 2) {
- GemmMicrokernelTester()
- .mr(2)
- .nr(4)
- .kr(1)
- .sr(1)
- .m(2)
- .n(n)
- .k(k)
- .a_stride(7)
- .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_2x4__scalar, xnn_init_qs8_conv_minmax_rndnu_scalar_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
- }
- }
-}
-
-TEST(QS8_GEMM_MINMAX_RNDNU_2X4__SCALAR, n_div_4_subtile) {
- for (uint32_t n = 8; n <= 12; n += 4) {
- for (size_t k = 1; k <= 5; k += 2) {
- for (uint32_t m = 1; m <= 2; m++) {
- GemmMicrokernelTester()
- .mr(2)
- .nr(4)
- .kr(1)
- .sr(1)
- .m(m)
- .n(n)
- .k(k)
- .iterations(1)
- .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_2x4__scalar, xnn_init_qs8_conv_minmax_rndnu_scalar_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
- }
- }
- }
-}
-
-TEST(QS8_GEMM_MINMAX_RNDNU_2X4__SCALAR, strided_cm_subtile) {
- for (size_t k = 1; k <= 5; k += 2) {
- for (uint32_t m = 1; m <= 2; m++) {
- for (uint32_t n = 1; n <= 4; n++) {
- GemmMicrokernelTester()
- .mr(2)
- .nr(4)
- .kr(1)
- .sr(1)
- .m(m)
- .n(n)
- .k(k)
- .cm_stride(7)
- .iterations(1)
- .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_2x4__scalar, xnn_init_qs8_conv_minmax_rndnu_scalar_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
- }
- }
- }
-}
-
-TEST(QS8_GEMM_MINMAX_RNDNU_2X4__SCALAR, qmin) {
- GemmMicrokernelTester()
- .mr(2)
- .nr(4)
- .kr(1)
- .sr(1)
- .m(2)
- .n(4)
- .k(1)
- .qmin(128)
- .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_2x4__scalar, xnn_init_qs8_conv_minmax_rndnu_scalar_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
-}
-
-TEST(QS8_GEMM_MINMAX_RNDNU_2X4__SCALAR, qmax) {
- GemmMicrokernelTester()
- .mr(2)
- .nr(4)
- .kr(1)
- .sr(1)
- .m(2)
- .n(4)
- .k(1)
- .qmax(128)
- .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_2x4__scalar, xnn_init_qs8_conv_minmax_rndnu_scalar_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
-}
-
-TEST(QS8_GEMM_MINMAX_RNDNU_2X4__SCALAR, strided_cm) {
- GemmMicrokernelTester()
- .mr(2)
- .nr(4)
- .kr(1)
- .sr(1)
- .m(2)
- .n(4)
- .k(1)
- .cm_stride(7)
- .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_2x4__scalar, xnn_init_qs8_conv_minmax_rndnu_scalar_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
-}
-
-
-TEST(QS8_GEMM_MINMAX_RNDNU_3X4__SCALAR, k_eq_1) {
- GemmMicrokernelTester()
- .mr(3)
- .nr(4)
- .kr(1)
- .sr(1)
- .m(3)
- .n(4)
- .k(1)
- .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_3x4__scalar, xnn_init_qs8_conv_minmax_rndnu_scalar_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
-}
-
-TEST(QS8_GEMM_MINMAX_RNDNU_3X4__SCALAR, strided_cn) {
- GemmMicrokernelTester()
- .mr(3)
- .nr(4)
- .kr(1)
- .sr(1)
- .m(3)
- .n(4)
- .k(1)
- .cn_stride(7)
- .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_3x4__scalar, xnn_init_qs8_conv_minmax_rndnu_scalar_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
-}
-
-TEST(QS8_GEMM_MINMAX_RNDNU_3X4__SCALAR, k_eq_1_strided_a) {
- GemmMicrokernelTester()
- .mr(3)
- .nr(4)
- .kr(1)
- .sr(1)
- .m(3)
- .n(4)
- .k(1)
- .a_stride(3)
- .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_3x4__scalar, xnn_init_qs8_conv_minmax_rndnu_scalar_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
-}
-
-TEST(QS8_GEMM_MINMAX_RNDNU_3X4__SCALAR, k_eq_1_subtile) {
- for (uint32_t m = 1; m <= 3; m++) {
- for (uint32_t n = 1; n <= 4; n++) {
- GemmMicrokernelTester()
- .mr(3)
- .nr(4)
- .kr(1)
- .sr(1)
- .m(m)
- .n(n)
- .k(1)
- .iterations(1)
- .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_3x4__scalar, xnn_init_qs8_conv_minmax_rndnu_scalar_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
- }
- }
-}
-
-TEST(QS8_GEMM_MINMAX_RNDNU_3X4__SCALAR, k_eq_1_subtile_m) {
- for (uint32_t m = 1; m <= 3; m++) {
- GemmMicrokernelTester()
- .mr(3)
- .nr(4)
- .kr(1)
- .sr(1)
- .m(m)
- .n(4)
- .k(1)
- .iterations(1)
- .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_3x4__scalar, xnn_init_qs8_conv_minmax_rndnu_scalar_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
- }
-}
-
-TEST(QS8_GEMM_MINMAX_RNDNU_3X4__SCALAR, k_eq_1_subtile_n) {
- for (uint32_t n = 1; n <= 4; n++) {
- GemmMicrokernelTester()
- .mr(3)
- .nr(4)
- .kr(1)
- .sr(1)
- .m(3)
- .n(n)
- .k(1)
- .iterations(1)
- .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_3x4__scalar, xnn_init_qs8_conv_minmax_rndnu_scalar_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
- }
-}
-
-TEST(QS8_GEMM_MINMAX_RNDNU_3X4__SCALAR, k_gt_1) {
- for (size_t k = 2; k < 10; k++) {
- GemmMicrokernelTester()
- .mr(3)
- .nr(4)
- .kr(1)
- .sr(1)
- .m(3)
- .n(4)
- .k(k)
- .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_3x4__scalar, xnn_init_qs8_conv_minmax_rndnu_scalar_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
- }
-}
-
-TEST(QS8_GEMM_MINMAX_RNDNU_3X4__SCALAR, k_gt_1_strided_a) {
- for (size_t k = 2; k < 10; k++) {
- GemmMicrokernelTester()
- .mr(3)
- .nr(4)
- .kr(1)
- .sr(1)
- .m(3)
- .n(4)
- .k(k)
- .a_stride(11)
- .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_3x4__scalar, xnn_init_qs8_conv_minmax_rndnu_scalar_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
- }
-}
-
-TEST(QS8_GEMM_MINMAX_RNDNU_3X4__SCALAR, k_gt_1_subtile) {
- for (size_t k = 2; k < 10; k++) {
- for (uint32_t m = 1; m <= 3; m++) {
- for (uint32_t n = 1; n <= 4; n++) {
- GemmMicrokernelTester()
- .mr(3)
- .nr(4)
- .kr(1)
- .sr(1)
- .m(m)
- .n(n)
- .k(k)
- .iterations(1)
- .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_3x4__scalar, xnn_init_qs8_conv_minmax_rndnu_scalar_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
- }
- }
- }
-}
-
-TEST(QS8_GEMM_MINMAX_RNDNU_3X4__SCALAR, n_gt_4) {
- for (uint32_t n = 5; n < 8; n++) {
- for (size_t k = 1; k <= 5; k += 2) {
- GemmMicrokernelTester()
- .mr(3)
- .nr(4)
- .kr(1)
- .sr(1)
- .m(3)
- .n(4)
- .k(k)
- .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_3x4__scalar, xnn_init_qs8_conv_minmax_rndnu_scalar_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
- }
- }
-}
-
-TEST(QS8_GEMM_MINMAX_RNDNU_3X4__SCALAR, n_gt_4_strided_cn) {
- for (uint32_t n = 5; n < 8; n++) {
- for (size_t k = 1; k <= 5; k += 2) {
- GemmMicrokernelTester()
- .mr(3)
- .nr(4)
- .kr(1)
- .sr(1)
- .m(3)
- .n(4)
- .k(k)
- .cn_stride(7)
- .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_3x4__scalar, xnn_init_qs8_conv_minmax_rndnu_scalar_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
- }
- }
-}
-
-TEST(QS8_GEMM_MINMAX_RNDNU_3X4__SCALAR, n_gt_4_strided_a) {
- for (uint32_t n = 5; n < 8; n++) {
- for (size_t k = 1; k <= 5; k += 2) {
- GemmMicrokernelTester()
- .mr(3)
- .nr(4)
- .kr(1)
- .sr(1)
- .m(3)
- .n(n)
- .k(k)
- .a_stride(7)
- .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_3x4__scalar, xnn_init_qs8_conv_minmax_rndnu_scalar_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
- }
- }
-}
-
-TEST(QS8_GEMM_MINMAX_RNDNU_3X4__SCALAR, n_gt_4_subtile) {
- for (uint32_t n = 5; n < 8; n++) {
- for (size_t k = 1; k <= 5; k += 2) {
- for (uint32_t m = 1; m <= 3; m++) {
- GemmMicrokernelTester()
- .mr(3)
- .nr(4)
- .kr(1)
- .sr(1)
- .m(m)
- .n(n)
- .k(k)
- .iterations(1)
- .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_3x4__scalar, xnn_init_qs8_conv_minmax_rndnu_scalar_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
- }
- }
- }
-}
-
-TEST(QS8_GEMM_MINMAX_RNDNU_3X4__SCALAR, n_div_4) {
- for (uint32_t n = 8; n <= 12; n += 4) {
- for (size_t k = 1; k <= 5; k += 2) {
- GemmMicrokernelTester()
- .mr(3)
- .nr(4)
- .kr(1)
- .sr(1)
- .m(3)
- .n(4)
- .k(k)
- .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_3x4__scalar, xnn_init_qs8_conv_minmax_rndnu_scalar_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
- }
- }
-}
-
-TEST(QS8_GEMM_MINMAX_RNDNU_3X4__SCALAR, n_div_4_strided_cn) {
- for (uint32_t n = 8; n <= 12; n += 4) {
- for (size_t k = 1; k <= 5; k += 2) {
- GemmMicrokernelTester()
- .mr(3)
- .nr(4)
- .kr(1)
- .sr(1)
- .m(3)
- .n(n)
- .k(k)
- .cn_stride(7)
- .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_3x4__scalar, xnn_init_qs8_conv_minmax_rndnu_scalar_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
- }
- }
-}
-
-TEST(QS8_GEMM_MINMAX_RNDNU_3X4__SCALAR, n_div_4_strided_a) {
- for (uint32_t n = 8; n <= 12; n += 4) {
- for (size_t k = 1; k <= 5; k += 2) {
- GemmMicrokernelTester()
- .mr(3)
- .nr(4)
- .kr(1)
- .sr(1)
- .m(3)
- .n(n)
- .k(k)
- .a_stride(7)
- .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_3x4__scalar, xnn_init_qs8_conv_minmax_rndnu_scalar_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
- }
- }
-}
-
-TEST(QS8_GEMM_MINMAX_RNDNU_3X4__SCALAR, n_div_4_subtile) {
- for (uint32_t n = 8; n <= 12; n += 4) {
- for (size_t k = 1; k <= 5; k += 2) {
- for (uint32_t m = 1; m <= 3; m++) {
- GemmMicrokernelTester()
- .mr(3)
- .nr(4)
- .kr(1)
- .sr(1)
- .m(m)
- .n(n)
- .k(k)
- .iterations(1)
- .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_3x4__scalar, xnn_init_qs8_conv_minmax_rndnu_scalar_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
- }
- }
- }
-}
-
-TEST(QS8_GEMM_MINMAX_RNDNU_3X4__SCALAR, strided_cm_subtile) {
- for (size_t k = 1; k <= 5; k += 2) {
- for (uint32_t m = 1; m <= 3; m++) {
- for (uint32_t n = 1; n <= 4; n++) {
- GemmMicrokernelTester()
- .mr(3)
- .nr(4)
- .kr(1)
- .sr(1)
- .m(m)
- .n(n)
- .k(k)
- .cm_stride(7)
- .iterations(1)
- .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_3x4__scalar, xnn_init_qs8_conv_minmax_rndnu_scalar_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
- }
- }
- }
-}
-
-TEST(QS8_GEMM_MINMAX_RNDNU_3X4__SCALAR, qmin) {
- GemmMicrokernelTester()
- .mr(3)
- .nr(4)
- .kr(1)
- .sr(1)
- .m(3)
- .n(4)
- .k(1)
- .qmin(128)
- .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_3x4__scalar, xnn_init_qs8_conv_minmax_rndnu_scalar_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
-}
-
-TEST(QS8_GEMM_MINMAX_RNDNU_3X4__SCALAR, qmax) {
- GemmMicrokernelTester()
- .mr(3)
- .nr(4)
- .kr(1)
- .sr(1)
- .m(3)
- .n(4)
- .k(1)
- .qmax(128)
- .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_3x4__scalar, xnn_init_qs8_conv_minmax_rndnu_scalar_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
-}
-
-TEST(QS8_GEMM_MINMAX_RNDNU_3X4__SCALAR, strided_cm) {
- GemmMicrokernelTester()
- .mr(3)
- .nr(4)
- .kr(1)
- .sr(1)
- .m(3)
- .n(4)
- .k(1)
- .cm_stride(7)
- .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_3x4__scalar, xnn_init_qs8_conv_minmax_rndnu_scalar_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
-}
-
-
-TEST(QS8_GEMM_MINMAX_RNDNU_4X4__SCALAR, k_eq_1) {
- GemmMicrokernelTester()
- .mr(4)
- .nr(4)
- .kr(1)
- .sr(1)
- .m(4)
- .n(4)
- .k(1)
- .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_4x4__scalar, xnn_init_qs8_conv_minmax_rndnu_scalar_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
-}
-
-TEST(QS8_GEMM_MINMAX_RNDNU_4X4__SCALAR, strided_cn) {
- GemmMicrokernelTester()
- .mr(4)
- .nr(4)
- .kr(1)
- .sr(1)
- .m(4)
- .n(4)
- .k(1)
- .cn_stride(7)
- .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_4x4__scalar, xnn_init_qs8_conv_minmax_rndnu_scalar_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
-}
-
-TEST(QS8_GEMM_MINMAX_RNDNU_4X4__SCALAR, k_eq_1_strided_a) {
- GemmMicrokernelTester()
- .mr(4)
- .nr(4)
- .kr(1)
- .sr(1)
- .m(4)
- .n(4)
- .k(1)
- .a_stride(3)
- .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_4x4__scalar, xnn_init_qs8_conv_minmax_rndnu_scalar_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
-}
-
-TEST(QS8_GEMM_MINMAX_RNDNU_4X4__SCALAR, k_eq_1_subtile) {
- for (uint32_t m = 1; m <= 4; m++) {
- for (uint32_t n = 1; n <= 4; n++) {
- GemmMicrokernelTester()
- .mr(4)
- .nr(4)
- .kr(1)
- .sr(1)
- .m(m)
- .n(n)
- .k(1)
- .iterations(1)
- .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_4x4__scalar, xnn_init_qs8_conv_minmax_rndnu_scalar_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
- }
- }
-}
-
-TEST(QS8_GEMM_MINMAX_RNDNU_4X4__SCALAR, k_eq_1_subtile_m) {
- for (uint32_t m = 1; m <= 4; m++) {
- GemmMicrokernelTester()
- .mr(4)
- .nr(4)
- .kr(1)
- .sr(1)
- .m(m)
- .n(4)
- .k(1)
- .iterations(1)
- .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_4x4__scalar, xnn_init_qs8_conv_minmax_rndnu_scalar_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
- }
-}
-
-TEST(QS8_GEMM_MINMAX_RNDNU_4X4__SCALAR, k_eq_1_subtile_n) {
- for (uint32_t n = 1; n <= 4; n++) {
- GemmMicrokernelTester()
- .mr(4)
- .nr(4)
- .kr(1)
- .sr(1)
- .m(4)
- .n(n)
- .k(1)
- .iterations(1)
- .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_4x4__scalar, xnn_init_qs8_conv_minmax_rndnu_scalar_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
- }
-}
-
-TEST(QS8_GEMM_MINMAX_RNDNU_4X4__SCALAR, k_gt_1) {
- for (size_t k = 2; k < 10; k++) {
- GemmMicrokernelTester()
- .mr(4)
- .nr(4)
- .kr(1)
- .sr(1)
- .m(4)
- .n(4)
- .k(k)
- .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_4x4__scalar, xnn_init_qs8_conv_minmax_rndnu_scalar_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
- }
-}
-
-TEST(QS8_GEMM_MINMAX_RNDNU_4X4__SCALAR, k_gt_1_strided_a) {
- for (size_t k = 2; k < 10; k++) {
- GemmMicrokernelTester()
- .mr(4)
- .nr(4)
- .kr(1)
- .sr(1)
- .m(4)
- .n(4)
- .k(k)
- .a_stride(11)
- .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_4x4__scalar, xnn_init_qs8_conv_minmax_rndnu_scalar_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
- }
-}
-
-TEST(QS8_GEMM_MINMAX_RNDNU_4X4__SCALAR, k_gt_1_subtile) {
- for (size_t k = 2; k < 10; k++) {
- for (uint32_t m = 1; m <= 4; m++) {
- for (uint32_t n = 1; n <= 4; n++) {
- GemmMicrokernelTester()
- .mr(4)
- .nr(4)
- .kr(1)
- .sr(1)
- .m(m)
- .n(n)
- .k(k)
- .iterations(1)
- .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_4x4__scalar, xnn_init_qs8_conv_minmax_rndnu_scalar_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
- }
- }
- }
-}
-
-TEST(QS8_GEMM_MINMAX_RNDNU_4X4__SCALAR, n_gt_4) {
- for (uint32_t n = 5; n < 8; n++) {
- for (size_t k = 1; k <= 5; k += 2) {
- GemmMicrokernelTester()
- .mr(4)
- .nr(4)
- .kr(1)
- .sr(1)
- .m(4)
- .n(4)
- .k(k)
- .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_4x4__scalar, xnn_init_qs8_conv_minmax_rndnu_scalar_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
- }
- }
-}
-
-TEST(QS8_GEMM_MINMAX_RNDNU_4X4__SCALAR, n_gt_4_strided_cn) {
- for (uint32_t n = 5; n < 8; n++) {
- for (size_t k = 1; k <= 5; k += 2) {
- GemmMicrokernelTester()
- .mr(4)
- .nr(4)
- .kr(1)
- .sr(1)
- .m(4)
- .n(4)
- .k(k)
- .cn_stride(7)
- .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_4x4__scalar, xnn_init_qs8_conv_minmax_rndnu_scalar_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
- }
- }
-}
-
-TEST(QS8_GEMM_MINMAX_RNDNU_4X4__SCALAR, n_gt_4_strided_a) {
- for (uint32_t n = 5; n < 8; n++) {
- for (size_t k = 1; k <= 5; k += 2) {
- GemmMicrokernelTester()
- .mr(4)
- .nr(4)
- .kr(1)
- .sr(1)
- .m(4)
- .n(n)
- .k(k)
- .a_stride(7)
- .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_4x4__scalar, xnn_init_qs8_conv_minmax_rndnu_scalar_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
- }
- }
-}
-
-TEST(QS8_GEMM_MINMAX_RNDNU_4X4__SCALAR, n_gt_4_subtile) {
- for (uint32_t n = 5; n < 8; n++) {
- for (size_t k = 1; k <= 5; k += 2) {
- for (uint32_t m = 1; m <= 4; m++) {
- GemmMicrokernelTester()
- .mr(4)
- .nr(4)
- .kr(1)
- .sr(1)
- .m(m)
- .n(n)
- .k(k)
- .iterations(1)
- .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_4x4__scalar, xnn_init_qs8_conv_minmax_rndnu_scalar_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
- }
- }
- }
-}
-
-TEST(QS8_GEMM_MINMAX_RNDNU_4X4__SCALAR, n_div_4) {
- for (uint32_t n = 8; n <= 12; n += 4) {
- for (size_t k = 1; k <= 5; k += 2) {
- GemmMicrokernelTester()
- .mr(4)
- .nr(4)
- .kr(1)
- .sr(1)
- .m(4)
- .n(4)
- .k(k)
- .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_4x4__scalar, xnn_init_qs8_conv_minmax_rndnu_scalar_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
- }
- }
-}
-
-TEST(QS8_GEMM_MINMAX_RNDNU_4X4__SCALAR, n_div_4_strided_cn) {
- for (uint32_t n = 8; n <= 12; n += 4) {
- for (size_t k = 1; k <= 5; k += 2) {
- GemmMicrokernelTester()
- .mr(4)
- .nr(4)
- .kr(1)
- .sr(1)
- .m(4)
- .n(n)
- .k(k)
- .cn_stride(7)
- .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_4x4__scalar, xnn_init_qs8_conv_minmax_rndnu_scalar_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
- }
- }
-}
-
-TEST(QS8_GEMM_MINMAX_RNDNU_4X4__SCALAR, n_div_4_strided_a) {
- for (uint32_t n = 8; n <= 12; n += 4) {
- for (size_t k = 1; k <= 5; k += 2) {
- GemmMicrokernelTester()
- .mr(4)
- .nr(4)
- .kr(1)
- .sr(1)
- .m(4)
- .n(n)
- .k(k)
- .a_stride(7)
- .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_4x4__scalar, xnn_init_qs8_conv_minmax_rndnu_scalar_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
- }
- }
-}
-
-TEST(QS8_GEMM_MINMAX_RNDNU_4X4__SCALAR, n_div_4_subtile) {
- for (uint32_t n = 8; n <= 12; n += 4) {
- for (size_t k = 1; k <= 5; k += 2) {
- for (uint32_t m = 1; m <= 4; m++) {
- GemmMicrokernelTester()
- .mr(4)
- .nr(4)
- .kr(1)
- .sr(1)
- .m(m)
- .n(n)
- .k(k)
- .iterations(1)
- .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_4x4__scalar, xnn_init_qs8_conv_minmax_rndnu_scalar_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
- }
- }
- }
-}
-
-TEST(QS8_GEMM_MINMAX_RNDNU_4X4__SCALAR, strided_cm_subtile) {
- for (size_t k = 1; k <= 5; k += 2) {
- for (uint32_t m = 1; m <= 4; m++) {
- for (uint32_t n = 1; n <= 4; n++) {
- GemmMicrokernelTester()
- .mr(4)
- .nr(4)
- .kr(1)
- .sr(1)
- .m(m)
- .n(n)
- .k(k)
- .cm_stride(7)
- .iterations(1)
- .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_4x4__scalar, xnn_init_qs8_conv_minmax_rndnu_scalar_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
- }
- }
- }
-}
-
-TEST(QS8_GEMM_MINMAX_RNDNU_4X4__SCALAR, qmin) {
- GemmMicrokernelTester()
- .mr(4)
- .nr(4)
- .kr(1)
- .sr(1)
- .m(4)
- .n(4)
- .k(1)
- .qmin(128)
- .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_4x4__scalar, xnn_init_qs8_conv_minmax_rndnu_scalar_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
-}
-
-TEST(QS8_GEMM_MINMAX_RNDNU_4X4__SCALAR, qmax) {
- GemmMicrokernelTester()
- .mr(4)
- .nr(4)
- .kr(1)
- .sr(1)
- .m(4)
- .n(4)
- .k(1)
- .qmax(128)
- .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_4x4__scalar, xnn_init_qs8_conv_minmax_rndnu_scalar_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
-}
-
-TEST(QS8_GEMM_MINMAX_RNDNU_4X4__SCALAR, strided_cm) {
- GemmMicrokernelTester()
- .mr(4)
- .nr(4)
- .kr(1)
- .sr(1)
- .m(4)
- .n(4)
- .k(1)
- .cm_stride(7)
- .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_4x4__scalar, xnn_init_qs8_conv_minmax_rndnu_scalar_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
-}
diff --git a/test/qs8-gemm-minmax-rndnu.yaml b/test/qs8-gemm-minmax-rndnu.yaml
index 7596a77..a70d5de 100644
--- a/test/qs8-gemm-minmax-rndnu.yaml
+++ b/test/qs8-gemm-minmax-rndnu.yaml
@@ -715,27 +715,3 @@
- name: xnn_qs8_gemm_minmax_rndnu_ukernel_4x16__neon_mull_addw_dup
init: xnn_init_qs8_conv_minmax_rndnu_neon_params
k-block: 8
-- name: xnn_qs8_gemm_minmax_rndnu_ukernel_1x2__scalar
- init: xnn_init_qs8_conv_minmax_rndnu_scalar_params
- k-block: 1
-- name: xnn_qs8_gemm_minmax_rndnu_ukernel_2x2__scalar
- init: xnn_init_qs8_conv_minmax_rndnu_scalar_params
- k-block: 1
-- name: xnn_qs8_gemm_minmax_rndnu_ukernel_3x2__scalar
- init: xnn_init_qs8_conv_minmax_rndnu_scalar_params
- k-block: 1
-- name: xnn_qs8_gemm_minmax_rndnu_ukernel_4x2__scalar
- init: xnn_init_qs8_conv_minmax_rndnu_scalar_params
- k-block: 1
-- name: xnn_qs8_gemm_minmax_rndnu_ukernel_1x4__scalar
- init: xnn_init_qs8_conv_minmax_rndnu_scalar_params
- k-block: 1
-- name: xnn_qs8_gemm_minmax_rndnu_ukernel_2x4__scalar
- init: xnn_init_qs8_conv_minmax_rndnu_scalar_params
- k-block: 1
-- name: xnn_qs8_gemm_minmax_rndnu_ukernel_3x4__scalar
- init: xnn_init_qs8_conv_minmax_rndnu_scalar_params
- k-block: 1
-- name: xnn_qs8_gemm_minmax_rndnu_ukernel_4x4__scalar
- init: xnn_init_qs8_conv_minmax_rndnu_scalar_params
- k-block: 1
diff --git a/test/qs8-igemm-minmax-fp32.cc b/test/qs8-igemm-minmax-fp32.cc
index ff9d647..69350b7 100644
--- a/test/qs8-igemm-minmax-fp32.cc
+++ b/test/qs8-igemm-minmax-fp32.cc
@@ -74093,7 +74093,7 @@
#endif // XNN_ARCH_WASMSIMD || XNN_ARCH_WASMRELAXEDSIMD
-TEST(QS8_IGEMM_MINMAX_FP32_1X2__SCALAR_LRINT, k_eq_1) {
+TEST(QS8_IGEMM_MINMAX_FP32_1X2__SCALAR_LRINTF, k_eq_1) {
GemmMicrokernelTester()
.mr(1)
.nr(2)
@@ -74102,10 +74102,10 @@
.m(1)
.n(2)
.k(1)
- .Test(xnn_qs8_igemm_minmax_fp32_ukernel_1x2__scalar_lrint, xnn_init_qs8_conv_minmax_fp32_scalar_lrint_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qs8_igemm_minmax_fp32_ukernel_1x2__scalar_lrintf, xnn_init_qs8_conv_minmax_fp32_scalar_lrintf_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
-TEST(QS8_IGEMM_MINMAX_FP32_1X2__SCALAR_LRINT, strided_cn) {
+TEST(QS8_IGEMM_MINMAX_FP32_1X2__SCALAR_LRINTF, strided_cn) {
GemmMicrokernelTester()
.mr(1)
.nr(2)
@@ -74115,10 +74115,10 @@
.n(2)
.k(1)
.cn_stride(5)
- .Test(xnn_qs8_igemm_minmax_fp32_ukernel_1x2__scalar_lrint, xnn_init_qs8_conv_minmax_fp32_scalar_lrint_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qs8_igemm_minmax_fp32_ukernel_1x2__scalar_lrintf, xnn_init_qs8_conv_minmax_fp32_scalar_lrintf_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
-TEST(QS8_IGEMM_MINMAX_FP32_1X2__SCALAR_LRINT, k_eq_1_subtile) {
+TEST(QS8_IGEMM_MINMAX_FP32_1X2__SCALAR_LRINTF, k_eq_1_subtile) {
for (uint32_t m = 1; m <= 1; m++) {
for (uint32_t n = 1; n <= 2; n++) {
GemmMicrokernelTester()
@@ -74130,12 +74130,12 @@
.n(n)
.k(1)
.iterations(1)
- .Test(xnn_qs8_igemm_minmax_fp32_ukernel_1x2__scalar_lrint, xnn_init_qs8_conv_minmax_fp32_scalar_lrint_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qs8_igemm_minmax_fp32_ukernel_1x2__scalar_lrintf, xnn_init_qs8_conv_minmax_fp32_scalar_lrintf_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
}
-TEST(QS8_IGEMM_MINMAX_FP32_1X2__SCALAR_LRINT, k_eq_1_subtile_m) {
+TEST(QS8_IGEMM_MINMAX_FP32_1X2__SCALAR_LRINTF, k_eq_1_subtile_m) {
for (uint32_t m = 1; m <= 1; m++) {
GemmMicrokernelTester()
.mr(1)
@@ -74146,11 +74146,11 @@
.n(2)
.k(1)
.iterations(1)
- .Test(xnn_qs8_igemm_minmax_fp32_ukernel_1x2__scalar_lrint, xnn_init_qs8_conv_minmax_fp32_scalar_lrint_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qs8_igemm_minmax_fp32_ukernel_1x2__scalar_lrintf, xnn_init_qs8_conv_minmax_fp32_scalar_lrintf_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
-TEST(QS8_IGEMM_MINMAX_FP32_1X2__SCALAR_LRINT, k_eq_1_subtile_n) {
+TEST(QS8_IGEMM_MINMAX_FP32_1X2__SCALAR_LRINTF, k_eq_1_subtile_n) {
for (uint32_t n = 1; n <= 2; n++) {
GemmMicrokernelTester()
.mr(1)
@@ -74161,11 +74161,11 @@
.n(n)
.k(1)
.iterations(1)
- .Test(xnn_qs8_igemm_minmax_fp32_ukernel_1x2__scalar_lrint, xnn_init_qs8_conv_minmax_fp32_scalar_lrint_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qs8_igemm_minmax_fp32_ukernel_1x2__scalar_lrintf, xnn_init_qs8_conv_minmax_fp32_scalar_lrintf_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
-TEST(QS8_IGEMM_MINMAX_FP32_1X2__SCALAR_LRINT, k_gt_1) {
+TEST(QS8_IGEMM_MINMAX_FP32_1X2__SCALAR_LRINTF, k_gt_1) {
for (size_t k = 2; k < 10; k++) {
GemmMicrokernelTester()
.mr(1)
@@ -74175,11 +74175,11 @@
.m(1)
.n(2)
.k(k)
- .Test(xnn_qs8_igemm_minmax_fp32_ukernel_1x2__scalar_lrint, xnn_init_qs8_conv_minmax_fp32_scalar_lrint_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qs8_igemm_minmax_fp32_ukernel_1x2__scalar_lrintf, xnn_init_qs8_conv_minmax_fp32_scalar_lrintf_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
-TEST(QS8_IGEMM_MINMAX_FP32_1X2__SCALAR_LRINT, k_gt_1_subtile) {
+TEST(QS8_IGEMM_MINMAX_FP32_1X2__SCALAR_LRINTF, k_gt_1_subtile) {
for (size_t k = 2; k < 10; k++) {
for (uint32_t m = 1; m <= 1; m++) {
for (uint32_t n = 1; n <= 2; n++) {
@@ -74192,13 +74192,13 @@
.n(n)
.k(k)
.iterations(1)
- .Test(xnn_qs8_igemm_minmax_fp32_ukernel_1x2__scalar_lrint, xnn_init_qs8_conv_minmax_fp32_scalar_lrint_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qs8_igemm_minmax_fp32_ukernel_1x2__scalar_lrintf, xnn_init_qs8_conv_minmax_fp32_scalar_lrintf_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
}
}
-TEST(QS8_IGEMM_MINMAX_FP32_1X2__SCALAR_LRINT, n_gt_2) {
+TEST(QS8_IGEMM_MINMAX_FP32_1X2__SCALAR_LRINTF, n_gt_2) {
for (uint32_t n = 3; n < 4; n++) {
for (size_t k = 1; k <= 5; k += 2) {
GemmMicrokernelTester()
@@ -74209,12 +74209,12 @@
.m(1)
.n(2)
.k(k)
- .Test(xnn_qs8_igemm_minmax_fp32_ukernel_1x2__scalar_lrint, xnn_init_qs8_conv_minmax_fp32_scalar_lrint_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qs8_igemm_minmax_fp32_ukernel_1x2__scalar_lrintf, xnn_init_qs8_conv_minmax_fp32_scalar_lrintf_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
}
-TEST(QS8_IGEMM_MINMAX_FP32_1X2__SCALAR_LRINT, n_gt_2_strided_cn) {
+TEST(QS8_IGEMM_MINMAX_FP32_1X2__SCALAR_LRINTF, n_gt_2_strided_cn) {
for (uint32_t n = 3; n < 4; n++) {
for (size_t k = 1; k <= 5; k += 2) {
GemmMicrokernelTester()
@@ -74226,12 +74226,12 @@
.n(2)
.k(k)
.cn_stride(5)
- .Test(xnn_qs8_igemm_minmax_fp32_ukernel_1x2__scalar_lrint, xnn_init_qs8_conv_minmax_fp32_scalar_lrint_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qs8_igemm_minmax_fp32_ukernel_1x2__scalar_lrintf, xnn_init_qs8_conv_minmax_fp32_scalar_lrintf_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
}
-TEST(QS8_IGEMM_MINMAX_FP32_1X2__SCALAR_LRINT, n_gt_2_subtile) {
+TEST(QS8_IGEMM_MINMAX_FP32_1X2__SCALAR_LRINTF, n_gt_2_subtile) {
for (uint32_t n = 3; n < 4; n++) {
for (size_t k = 1; k <= 5; k += 2) {
for (uint32_t m = 1; m <= 1; m++) {
@@ -74244,13 +74244,13 @@
.n(n)
.k(k)
.iterations(1)
- .Test(xnn_qs8_igemm_minmax_fp32_ukernel_1x2__scalar_lrint, xnn_init_qs8_conv_minmax_fp32_scalar_lrint_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qs8_igemm_minmax_fp32_ukernel_1x2__scalar_lrintf, xnn_init_qs8_conv_minmax_fp32_scalar_lrintf_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
}
}
-TEST(QS8_IGEMM_MINMAX_FP32_1X2__SCALAR_LRINT, n_div_2) {
+TEST(QS8_IGEMM_MINMAX_FP32_1X2__SCALAR_LRINTF, n_div_2) {
for (uint32_t n = 4; n <= 6; n += 2) {
for (size_t k = 1; k <= 5; k += 2) {
GemmMicrokernelTester()
@@ -74261,12 +74261,12 @@
.m(1)
.n(2)
.k(k)
- .Test(xnn_qs8_igemm_minmax_fp32_ukernel_1x2__scalar_lrint, xnn_init_qs8_conv_minmax_fp32_scalar_lrint_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qs8_igemm_minmax_fp32_ukernel_1x2__scalar_lrintf, xnn_init_qs8_conv_minmax_fp32_scalar_lrintf_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
}
-TEST(QS8_IGEMM_MINMAX_FP32_1X2__SCALAR_LRINT, n_div_2_strided_cn) {
+TEST(QS8_IGEMM_MINMAX_FP32_1X2__SCALAR_LRINTF, n_div_2_strided_cn) {
for (uint32_t n = 4; n <= 6; n += 2) {
for (size_t k = 1; k <= 5; k += 2) {
GemmMicrokernelTester()
@@ -74278,12 +74278,12 @@
.n(n)
.k(k)
.cn_stride(5)
- .Test(xnn_qs8_igemm_minmax_fp32_ukernel_1x2__scalar_lrint, xnn_init_qs8_conv_minmax_fp32_scalar_lrint_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qs8_igemm_minmax_fp32_ukernel_1x2__scalar_lrintf, xnn_init_qs8_conv_minmax_fp32_scalar_lrintf_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
}
-TEST(QS8_IGEMM_MINMAX_FP32_1X2__SCALAR_LRINT, n_div_2_subtile) {
+TEST(QS8_IGEMM_MINMAX_FP32_1X2__SCALAR_LRINTF, n_div_2_subtile) {
for (uint32_t n = 4; n <= 6; n += 2) {
for (size_t k = 1; k <= 5; k += 2) {
for (uint32_t m = 1; m <= 1; m++) {
@@ -74296,13 +74296,13 @@
.n(n)
.k(k)
.iterations(1)
- .Test(xnn_qs8_igemm_minmax_fp32_ukernel_1x2__scalar_lrint, xnn_init_qs8_conv_minmax_fp32_scalar_lrint_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qs8_igemm_minmax_fp32_ukernel_1x2__scalar_lrintf, xnn_init_qs8_conv_minmax_fp32_scalar_lrintf_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
}
}
-TEST(QS8_IGEMM_MINMAX_FP32_1X2__SCALAR_LRINT, small_kernel) {
+TEST(QS8_IGEMM_MINMAX_FP32_1X2__SCALAR_LRINTF, small_kernel) {
for (size_t k = 1; k <= 5; k += 2) {
GemmMicrokernelTester()
.mr(1)
@@ -74313,11 +74313,11 @@
.n(2)
.k(k)
.ks(3)
- .Test(xnn_qs8_igemm_minmax_fp32_ukernel_1x2__scalar_lrint, xnn_init_qs8_conv_minmax_fp32_scalar_lrint_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qs8_igemm_minmax_fp32_ukernel_1x2__scalar_lrintf, xnn_init_qs8_conv_minmax_fp32_scalar_lrintf_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
-TEST(QS8_IGEMM_MINMAX_FP32_1X2__SCALAR_LRINT, small_kernel_subtile) {
+TEST(QS8_IGEMM_MINMAX_FP32_1X2__SCALAR_LRINTF, small_kernel_subtile) {
for (size_t k = 1; k <= 5; k += 2) {
for (uint32_t m = 1; m <= 1; m++) {
for (uint32_t n = 1; n <= 2; n++) {
@@ -74331,13 +74331,13 @@
.k(k)
.ks(3)
.iterations(1)
- .Test(xnn_qs8_igemm_minmax_fp32_ukernel_1x2__scalar_lrint, xnn_init_qs8_conv_minmax_fp32_scalar_lrint_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qs8_igemm_minmax_fp32_ukernel_1x2__scalar_lrintf, xnn_init_qs8_conv_minmax_fp32_scalar_lrintf_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
}
}
-TEST(QS8_IGEMM_MINMAX_FP32_1X2__SCALAR_LRINT, n_gt_2_small_kernel) {
+TEST(QS8_IGEMM_MINMAX_FP32_1X2__SCALAR_LRINTF, n_gt_2_small_kernel) {
for (uint32_t n = 3; n < 4; n++) {
for (size_t k = 1; k <= 5; k += 2) {
GemmMicrokernelTester()
@@ -74349,12 +74349,12 @@
.n(2)
.k(k)
.ks(3)
- .Test(xnn_qs8_igemm_minmax_fp32_ukernel_1x2__scalar_lrint, xnn_init_qs8_conv_minmax_fp32_scalar_lrint_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qs8_igemm_minmax_fp32_ukernel_1x2__scalar_lrintf, xnn_init_qs8_conv_minmax_fp32_scalar_lrintf_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
}
-TEST(QS8_IGEMM_MINMAX_FP32_1X2__SCALAR_LRINT, n_div_2_small_kernel) {
+TEST(QS8_IGEMM_MINMAX_FP32_1X2__SCALAR_LRINTF, n_div_2_small_kernel) {
for (uint32_t n = 4; n <= 6; n += 2) {
for (size_t k = 1; k <= 5; k += 2) {
GemmMicrokernelTester()
@@ -74366,12 +74366,12 @@
.n(2)
.k(k)
.ks(3)
- .Test(xnn_qs8_igemm_minmax_fp32_ukernel_1x2__scalar_lrint, xnn_init_qs8_conv_minmax_fp32_scalar_lrint_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qs8_igemm_minmax_fp32_ukernel_1x2__scalar_lrintf, xnn_init_qs8_conv_minmax_fp32_scalar_lrintf_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
}
-TEST(QS8_IGEMM_MINMAX_FP32_1X2__SCALAR_LRINT, strided_cm_subtile) {
+TEST(QS8_IGEMM_MINMAX_FP32_1X2__SCALAR_LRINTF, strided_cm_subtile) {
for (size_t k = 1; k <= 5; k += 2) {
for (uint32_t m = 1; m <= 1; m++) {
for (uint32_t n = 1; n <= 2; n++) {
@@ -74385,13 +74385,13 @@
.k(k)
.cm_stride(5)
.iterations(1)
- .Test(xnn_qs8_igemm_minmax_fp32_ukernel_1x2__scalar_lrint, xnn_init_qs8_conv_minmax_fp32_scalar_lrint_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qs8_igemm_minmax_fp32_ukernel_1x2__scalar_lrintf, xnn_init_qs8_conv_minmax_fp32_scalar_lrintf_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
}
}
-TEST(QS8_IGEMM_MINMAX_FP32_1X2__SCALAR_LRINT, a_offset) {
+TEST(QS8_IGEMM_MINMAX_FP32_1X2__SCALAR_LRINTF, a_offset) {
for (size_t k = 1; k <= 5; k += 2) {
GemmMicrokernelTester()
.mr(1)
@@ -74403,11 +74403,11 @@
.k(k)
.ks(3)
.a_offset(7)
- .Test(xnn_qs8_igemm_minmax_fp32_ukernel_1x2__scalar_lrint, xnn_init_qs8_conv_minmax_fp32_scalar_lrint_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qs8_igemm_minmax_fp32_ukernel_1x2__scalar_lrintf, xnn_init_qs8_conv_minmax_fp32_scalar_lrintf_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
-TEST(QS8_IGEMM_MINMAX_FP32_1X2__SCALAR_LRINT, zero) {
+TEST(QS8_IGEMM_MINMAX_FP32_1X2__SCALAR_LRINTF, zero) {
for (uint32_t mz = 0; mz < 1; mz++) {
for (size_t k = 1; k <= 5; k += 2) {
GemmMicrokernelTester()
@@ -74421,12 +74421,12 @@
.ks(3)
.a_offset(7)
.zero_index(mz)
- .Test(xnn_qs8_igemm_minmax_fp32_ukernel_1x2__scalar_lrint, xnn_init_qs8_conv_minmax_fp32_scalar_lrint_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qs8_igemm_minmax_fp32_ukernel_1x2__scalar_lrintf, xnn_init_qs8_conv_minmax_fp32_scalar_lrintf_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
}
-TEST(QS8_IGEMM_MINMAX_FP32_1X2__SCALAR_LRINT, qmin) {
+TEST(QS8_IGEMM_MINMAX_FP32_1X2__SCALAR_LRINTF, qmin) {
GemmMicrokernelTester()
.mr(1)
.nr(2)
@@ -74436,10 +74436,10 @@
.n(2)
.k(1)
.qmin(128)
- .Test(xnn_qs8_igemm_minmax_fp32_ukernel_1x2__scalar_lrint, xnn_init_qs8_conv_minmax_fp32_scalar_lrint_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qs8_igemm_minmax_fp32_ukernel_1x2__scalar_lrintf, xnn_init_qs8_conv_minmax_fp32_scalar_lrintf_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
-TEST(QS8_IGEMM_MINMAX_FP32_1X2__SCALAR_LRINT, qmax) {
+TEST(QS8_IGEMM_MINMAX_FP32_1X2__SCALAR_LRINTF, qmax) {
GemmMicrokernelTester()
.mr(1)
.nr(2)
@@ -74449,10 +74449,10 @@
.n(2)
.k(1)
.qmax(128)
- .Test(xnn_qs8_igemm_minmax_fp32_ukernel_1x2__scalar_lrint, xnn_init_qs8_conv_minmax_fp32_scalar_lrint_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qs8_igemm_minmax_fp32_ukernel_1x2__scalar_lrintf, xnn_init_qs8_conv_minmax_fp32_scalar_lrintf_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
-TEST(QS8_IGEMM_MINMAX_FP32_1X2__SCALAR_LRINT, strided_cm) {
+TEST(QS8_IGEMM_MINMAX_FP32_1X2__SCALAR_LRINTF, strided_cm) {
GemmMicrokernelTester()
.mr(1)
.nr(2)
@@ -74462,11 +74462,11 @@
.n(2)
.k(1)
.cm_stride(5)
- .Test(xnn_qs8_igemm_minmax_fp32_ukernel_1x2__scalar_lrint, xnn_init_qs8_conv_minmax_fp32_scalar_lrint_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qs8_igemm_minmax_fp32_ukernel_1x2__scalar_lrintf, xnn_init_qs8_conv_minmax_fp32_scalar_lrintf_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
-TEST(QS8_IGEMM_MINMAX_FP32_2X2__SCALAR_LRINT, k_eq_1) {
+TEST(QS8_IGEMM_MINMAX_FP32_2X2__SCALAR_LRINTF, k_eq_1) {
GemmMicrokernelTester()
.mr(2)
.nr(2)
@@ -74475,10 +74475,10 @@
.m(2)
.n(2)
.k(1)
- .Test(xnn_qs8_igemm_minmax_fp32_ukernel_2x2__scalar_lrint, xnn_init_qs8_conv_minmax_fp32_scalar_lrint_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qs8_igemm_minmax_fp32_ukernel_2x2__scalar_lrintf, xnn_init_qs8_conv_minmax_fp32_scalar_lrintf_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
-TEST(QS8_IGEMM_MINMAX_FP32_2X2__SCALAR_LRINT, strided_cn) {
+TEST(QS8_IGEMM_MINMAX_FP32_2X2__SCALAR_LRINTF, strided_cn) {
GemmMicrokernelTester()
.mr(2)
.nr(2)
@@ -74488,10 +74488,10 @@
.n(2)
.k(1)
.cn_stride(5)
- .Test(xnn_qs8_igemm_minmax_fp32_ukernel_2x2__scalar_lrint, xnn_init_qs8_conv_minmax_fp32_scalar_lrint_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qs8_igemm_minmax_fp32_ukernel_2x2__scalar_lrintf, xnn_init_qs8_conv_minmax_fp32_scalar_lrintf_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
-TEST(QS8_IGEMM_MINMAX_FP32_2X2__SCALAR_LRINT, k_eq_1_subtile) {
+TEST(QS8_IGEMM_MINMAX_FP32_2X2__SCALAR_LRINTF, k_eq_1_subtile) {
for (uint32_t m = 1; m <= 2; m++) {
for (uint32_t n = 1; n <= 2; n++) {
GemmMicrokernelTester()
@@ -74503,12 +74503,12 @@
.n(n)
.k(1)
.iterations(1)
- .Test(xnn_qs8_igemm_minmax_fp32_ukernel_2x2__scalar_lrint, xnn_init_qs8_conv_minmax_fp32_scalar_lrint_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qs8_igemm_minmax_fp32_ukernel_2x2__scalar_lrintf, xnn_init_qs8_conv_minmax_fp32_scalar_lrintf_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
}
-TEST(QS8_IGEMM_MINMAX_FP32_2X2__SCALAR_LRINT, k_eq_1_subtile_m) {
+TEST(QS8_IGEMM_MINMAX_FP32_2X2__SCALAR_LRINTF, k_eq_1_subtile_m) {
for (uint32_t m = 1; m <= 2; m++) {
GemmMicrokernelTester()
.mr(2)
@@ -74519,11 +74519,11 @@
.n(2)
.k(1)
.iterations(1)
- .Test(xnn_qs8_igemm_minmax_fp32_ukernel_2x2__scalar_lrint, xnn_init_qs8_conv_minmax_fp32_scalar_lrint_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qs8_igemm_minmax_fp32_ukernel_2x2__scalar_lrintf, xnn_init_qs8_conv_minmax_fp32_scalar_lrintf_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
-TEST(QS8_IGEMM_MINMAX_FP32_2X2__SCALAR_LRINT, k_eq_1_subtile_n) {
+TEST(QS8_IGEMM_MINMAX_FP32_2X2__SCALAR_LRINTF, k_eq_1_subtile_n) {
for (uint32_t n = 1; n <= 2; n++) {
GemmMicrokernelTester()
.mr(2)
@@ -74534,11 +74534,11 @@
.n(n)
.k(1)
.iterations(1)
- .Test(xnn_qs8_igemm_minmax_fp32_ukernel_2x2__scalar_lrint, xnn_init_qs8_conv_minmax_fp32_scalar_lrint_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qs8_igemm_minmax_fp32_ukernel_2x2__scalar_lrintf, xnn_init_qs8_conv_minmax_fp32_scalar_lrintf_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
-TEST(QS8_IGEMM_MINMAX_FP32_2X2__SCALAR_LRINT, k_gt_1) {
+TEST(QS8_IGEMM_MINMAX_FP32_2X2__SCALAR_LRINTF, k_gt_1) {
for (size_t k = 2; k < 10; k++) {
GemmMicrokernelTester()
.mr(2)
@@ -74548,11 +74548,11 @@
.m(2)
.n(2)
.k(k)
- .Test(xnn_qs8_igemm_minmax_fp32_ukernel_2x2__scalar_lrint, xnn_init_qs8_conv_minmax_fp32_scalar_lrint_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qs8_igemm_minmax_fp32_ukernel_2x2__scalar_lrintf, xnn_init_qs8_conv_minmax_fp32_scalar_lrintf_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
-TEST(QS8_IGEMM_MINMAX_FP32_2X2__SCALAR_LRINT, k_gt_1_subtile) {
+TEST(QS8_IGEMM_MINMAX_FP32_2X2__SCALAR_LRINTF, k_gt_1_subtile) {
for (size_t k = 2; k < 10; k++) {
for (uint32_t m = 1; m <= 2; m++) {
for (uint32_t n = 1; n <= 2; n++) {
@@ -74565,13 +74565,13 @@
.n(n)
.k(k)
.iterations(1)
- .Test(xnn_qs8_igemm_minmax_fp32_ukernel_2x2__scalar_lrint, xnn_init_qs8_conv_minmax_fp32_scalar_lrint_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qs8_igemm_minmax_fp32_ukernel_2x2__scalar_lrintf, xnn_init_qs8_conv_minmax_fp32_scalar_lrintf_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
}
}
-TEST(QS8_IGEMM_MINMAX_FP32_2X2__SCALAR_LRINT, n_gt_2) {
+TEST(QS8_IGEMM_MINMAX_FP32_2X2__SCALAR_LRINTF, n_gt_2) {
for (uint32_t n = 3; n < 4; n++) {
for (size_t k = 1; k <= 5; k += 2) {
GemmMicrokernelTester()
@@ -74582,12 +74582,12 @@
.m(2)
.n(2)
.k(k)
- .Test(xnn_qs8_igemm_minmax_fp32_ukernel_2x2__scalar_lrint, xnn_init_qs8_conv_minmax_fp32_scalar_lrint_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qs8_igemm_minmax_fp32_ukernel_2x2__scalar_lrintf, xnn_init_qs8_conv_minmax_fp32_scalar_lrintf_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
}
-TEST(QS8_IGEMM_MINMAX_FP32_2X2__SCALAR_LRINT, n_gt_2_strided_cn) {
+TEST(QS8_IGEMM_MINMAX_FP32_2X2__SCALAR_LRINTF, n_gt_2_strided_cn) {
for (uint32_t n = 3; n < 4; n++) {
for (size_t k = 1; k <= 5; k += 2) {
GemmMicrokernelTester()
@@ -74599,12 +74599,12 @@
.n(2)
.k(k)
.cn_stride(5)
- .Test(xnn_qs8_igemm_minmax_fp32_ukernel_2x2__scalar_lrint, xnn_init_qs8_conv_minmax_fp32_scalar_lrint_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qs8_igemm_minmax_fp32_ukernel_2x2__scalar_lrintf, xnn_init_qs8_conv_minmax_fp32_scalar_lrintf_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
}
-TEST(QS8_IGEMM_MINMAX_FP32_2X2__SCALAR_LRINT, n_gt_2_subtile) {
+TEST(QS8_IGEMM_MINMAX_FP32_2X2__SCALAR_LRINTF, n_gt_2_subtile) {
for (uint32_t n = 3; n < 4; n++) {
for (size_t k = 1; k <= 5; k += 2) {
for (uint32_t m = 1; m <= 2; m++) {
@@ -74617,13 +74617,13 @@
.n(n)
.k(k)
.iterations(1)
- .Test(xnn_qs8_igemm_minmax_fp32_ukernel_2x2__scalar_lrint, xnn_init_qs8_conv_minmax_fp32_scalar_lrint_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qs8_igemm_minmax_fp32_ukernel_2x2__scalar_lrintf, xnn_init_qs8_conv_minmax_fp32_scalar_lrintf_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
}
}
-TEST(QS8_IGEMM_MINMAX_FP32_2X2__SCALAR_LRINT, n_div_2) {
+TEST(QS8_IGEMM_MINMAX_FP32_2X2__SCALAR_LRINTF, n_div_2) {
for (uint32_t n = 4; n <= 6; n += 2) {
for (size_t k = 1; k <= 5; k += 2) {
GemmMicrokernelTester()
@@ -74634,12 +74634,12 @@
.m(2)
.n(2)
.k(k)
- .Test(xnn_qs8_igemm_minmax_fp32_ukernel_2x2__scalar_lrint, xnn_init_qs8_conv_minmax_fp32_scalar_lrint_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qs8_igemm_minmax_fp32_ukernel_2x2__scalar_lrintf, xnn_init_qs8_conv_minmax_fp32_scalar_lrintf_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
}
-TEST(QS8_IGEMM_MINMAX_FP32_2X2__SCALAR_LRINT, n_div_2_strided_cn) {
+TEST(QS8_IGEMM_MINMAX_FP32_2X2__SCALAR_LRINTF, n_div_2_strided_cn) {
for (uint32_t n = 4; n <= 6; n += 2) {
for (size_t k = 1; k <= 5; k += 2) {
GemmMicrokernelTester()
@@ -74651,12 +74651,12 @@
.n(n)
.k(k)
.cn_stride(5)
- .Test(xnn_qs8_igemm_minmax_fp32_ukernel_2x2__scalar_lrint, xnn_init_qs8_conv_minmax_fp32_scalar_lrint_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qs8_igemm_minmax_fp32_ukernel_2x2__scalar_lrintf, xnn_init_qs8_conv_minmax_fp32_scalar_lrintf_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
}
-TEST(QS8_IGEMM_MINMAX_FP32_2X2__SCALAR_LRINT, n_div_2_subtile) {
+TEST(QS8_IGEMM_MINMAX_FP32_2X2__SCALAR_LRINTF, n_div_2_subtile) {
for (uint32_t n = 4; n <= 6; n += 2) {
for (size_t k = 1; k <= 5; k += 2) {
for (uint32_t m = 1; m <= 2; m++) {
@@ -74669,13 +74669,13 @@
.n(n)
.k(k)
.iterations(1)
- .Test(xnn_qs8_igemm_minmax_fp32_ukernel_2x2__scalar_lrint, xnn_init_qs8_conv_minmax_fp32_scalar_lrint_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qs8_igemm_minmax_fp32_ukernel_2x2__scalar_lrintf, xnn_init_qs8_conv_minmax_fp32_scalar_lrintf_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
}
}
-TEST(QS8_IGEMM_MINMAX_FP32_2X2__SCALAR_LRINT, small_kernel) {
+TEST(QS8_IGEMM_MINMAX_FP32_2X2__SCALAR_LRINTF, small_kernel) {
for (size_t k = 1; k <= 5; k += 2) {
GemmMicrokernelTester()
.mr(2)
@@ -74686,11 +74686,11 @@
.n(2)
.k(k)
.ks(3)
- .Test(xnn_qs8_igemm_minmax_fp32_ukernel_2x2__scalar_lrint, xnn_init_qs8_conv_minmax_fp32_scalar_lrint_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qs8_igemm_minmax_fp32_ukernel_2x2__scalar_lrintf, xnn_init_qs8_conv_minmax_fp32_scalar_lrintf_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
-TEST(QS8_IGEMM_MINMAX_FP32_2X2__SCALAR_LRINT, small_kernel_subtile) {
+TEST(QS8_IGEMM_MINMAX_FP32_2X2__SCALAR_LRINTF, small_kernel_subtile) {
for (size_t k = 1; k <= 5; k += 2) {
for (uint32_t m = 1; m <= 2; m++) {
for (uint32_t n = 1; n <= 2; n++) {
@@ -74704,13 +74704,13 @@
.k(k)
.ks(3)
.iterations(1)
- .Test(xnn_qs8_igemm_minmax_fp32_ukernel_2x2__scalar_lrint, xnn_init_qs8_conv_minmax_fp32_scalar_lrint_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qs8_igemm_minmax_fp32_ukernel_2x2__scalar_lrintf, xnn_init_qs8_conv_minmax_fp32_scalar_lrintf_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
}
}
-TEST(QS8_IGEMM_MINMAX_FP32_2X2__SCALAR_LRINT, n_gt_2_small_kernel) {
+TEST(QS8_IGEMM_MINMAX_FP32_2X2__SCALAR_LRINTF, n_gt_2_small_kernel) {
for (uint32_t n = 3; n < 4; n++) {
for (size_t k = 1; k <= 5; k += 2) {
GemmMicrokernelTester()
@@ -74722,12 +74722,12 @@
.n(2)
.k(k)
.ks(3)
- .Test(xnn_qs8_igemm_minmax_fp32_ukernel_2x2__scalar_lrint, xnn_init_qs8_conv_minmax_fp32_scalar_lrint_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qs8_igemm_minmax_fp32_ukernel_2x2__scalar_lrintf, xnn_init_qs8_conv_minmax_fp32_scalar_lrintf_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
}
-TEST(QS8_IGEMM_MINMAX_FP32_2X2__SCALAR_LRINT, n_div_2_small_kernel) {
+TEST(QS8_IGEMM_MINMAX_FP32_2X2__SCALAR_LRINTF, n_div_2_small_kernel) {
for (uint32_t n = 4; n <= 6; n += 2) {
for (size_t k = 1; k <= 5; k += 2) {
GemmMicrokernelTester()
@@ -74739,12 +74739,12 @@
.n(2)
.k(k)
.ks(3)
- .Test(xnn_qs8_igemm_minmax_fp32_ukernel_2x2__scalar_lrint, xnn_init_qs8_conv_minmax_fp32_scalar_lrint_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qs8_igemm_minmax_fp32_ukernel_2x2__scalar_lrintf, xnn_init_qs8_conv_minmax_fp32_scalar_lrintf_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
}
-TEST(QS8_IGEMM_MINMAX_FP32_2X2__SCALAR_LRINT, strided_cm_subtile) {
+TEST(QS8_IGEMM_MINMAX_FP32_2X2__SCALAR_LRINTF, strided_cm_subtile) {
for (size_t k = 1; k <= 5; k += 2) {
for (uint32_t m = 1; m <= 2; m++) {
for (uint32_t n = 1; n <= 2; n++) {
@@ -74758,13 +74758,13 @@
.k(k)
.cm_stride(5)
.iterations(1)
- .Test(xnn_qs8_igemm_minmax_fp32_ukernel_2x2__scalar_lrint, xnn_init_qs8_conv_minmax_fp32_scalar_lrint_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qs8_igemm_minmax_fp32_ukernel_2x2__scalar_lrintf, xnn_init_qs8_conv_minmax_fp32_scalar_lrintf_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
}
}
-TEST(QS8_IGEMM_MINMAX_FP32_2X2__SCALAR_LRINT, a_offset) {
+TEST(QS8_IGEMM_MINMAX_FP32_2X2__SCALAR_LRINTF, a_offset) {
for (size_t k = 1; k <= 5; k += 2) {
GemmMicrokernelTester()
.mr(2)
@@ -74776,11 +74776,11 @@
.k(k)
.ks(3)
.a_offset(13)
- .Test(xnn_qs8_igemm_minmax_fp32_ukernel_2x2__scalar_lrint, xnn_init_qs8_conv_minmax_fp32_scalar_lrint_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qs8_igemm_minmax_fp32_ukernel_2x2__scalar_lrintf, xnn_init_qs8_conv_minmax_fp32_scalar_lrintf_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
-TEST(QS8_IGEMM_MINMAX_FP32_2X2__SCALAR_LRINT, zero) {
+TEST(QS8_IGEMM_MINMAX_FP32_2X2__SCALAR_LRINTF, zero) {
for (uint32_t mz = 0; mz < 2; mz++) {
for (size_t k = 1; k <= 5; k += 2) {
GemmMicrokernelTester()
@@ -74794,12 +74794,12 @@
.ks(3)
.a_offset(13)
.zero_index(mz)
- .Test(xnn_qs8_igemm_minmax_fp32_ukernel_2x2__scalar_lrint, xnn_init_qs8_conv_minmax_fp32_scalar_lrint_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qs8_igemm_minmax_fp32_ukernel_2x2__scalar_lrintf, xnn_init_qs8_conv_minmax_fp32_scalar_lrintf_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
}
-TEST(QS8_IGEMM_MINMAX_FP32_2X2__SCALAR_LRINT, qmin) {
+TEST(QS8_IGEMM_MINMAX_FP32_2X2__SCALAR_LRINTF, qmin) {
GemmMicrokernelTester()
.mr(2)
.nr(2)
@@ -74809,10 +74809,10 @@
.n(2)
.k(1)
.qmin(128)
- .Test(xnn_qs8_igemm_minmax_fp32_ukernel_2x2__scalar_lrint, xnn_init_qs8_conv_minmax_fp32_scalar_lrint_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qs8_igemm_minmax_fp32_ukernel_2x2__scalar_lrintf, xnn_init_qs8_conv_minmax_fp32_scalar_lrintf_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
-TEST(QS8_IGEMM_MINMAX_FP32_2X2__SCALAR_LRINT, qmax) {
+TEST(QS8_IGEMM_MINMAX_FP32_2X2__SCALAR_LRINTF, qmax) {
GemmMicrokernelTester()
.mr(2)
.nr(2)
@@ -74822,10 +74822,10 @@
.n(2)
.k(1)
.qmax(128)
- .Test(xnn_qs8_igemm_minmax_fp32_ukernel_2x2__scalar_lrint, xnn_init_qs8_conv_minmax_fp32_scalar_lrint_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qs8_igemm_minmax_fp32_ukernel_2x2__scalar_lrintf, xnn_init_qs8_conv_minmax_fp32_scalar_lrintf_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
-TEST(QS8_IGEMM_MINMAX_FP32_2X2__SCALAR_LRINT, strided_cm) {
+TEST(QS8_IGEMM_MINMAX_FP32_2X2__SCALAR_LRINTF, strided_cm) {
GemmMicrokernelTester()
.mr(2)
.nr(2)
@@ -74835,11 +74835,11 @@
.n(2)
.k(1)
.cm_stride(5)
- .Test(xnn_qs8_igemm_minmax_fp32_ukernel_2x2__scalar_lrint, xnn_init_qs8_conv_minmax_fp32_scalar_lrint_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qs8_igemm_minmax_fp32_ukernel_2x2__scalar_lrintf, xnn_init_qs8_conv_minmax_fp32_scalar_lrintf_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
-TEST(QS8_IGEMM_MINMAX_FP32_3X2__SCALAR_LRINT, k_eq_1) {
+TEST(QS8_IGEMM_MINMAX_FP32_3X2__SCALAR_LRINTF, k_eq_1) {
GemmMicrokernelTester()
.mr(3)
.nr(2)
@@ -74848,10 +74848,10 @@
.m(3)
.n(2)
.k(1)
- .Test(xnn_qs8_igemm_minmax_fp32_ukernel_3x2__scalar_lrint, xnn_init_qs8_conv_minmax_fp32_scalar_lrint_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qs8_igemm_minmax_fp32_ukernel_3x2__scalar_lrintf, xnn_init_qs8_conv_minmax_fp32_scalar_lrintf_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
-TEST(QS8_IGEMM_MINMAX_FP32_3X2__SCALAR_LRINT, strided_cn) {
+TEST(QS8_IGEMM_MINMAX_FP32_3X2__SCALAR_LRINTF, strided_cn) {
GemmMicrokernelTester()
.mr(3)
.nr(2)
@@ -74861,10 +74861,10 @@
.n(2)
.k(1)
.cn_stride(5)
- .Test(xnn_qs8_igemm_minmax_fp32_ukernel_3x2__scalar_lrint, xnn_init_qs8_conv_minmax_fp32_scalar_lrint_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qs8_igemm_minmax_fp32_ukernel_3x2__scalar_lrintf, xnn_init_qs8_conv_minmax_fp32_scalar_lrintf_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
-TEST(QS8_IGEMM_MINMAX_FP32_3X2__SCALAR_LRINT, k_eq_1_subtile) {
+TEST(QS8_IGEMM_MINMAX_FP32_3X2__SCALAR_LRINTF, k_eq_1_subtile) {
for (uint32_t m = 1; m <= 3; m++) {
for (uint32_t n = 1; n <= 2; n++) {
GemmMicrokernelTester()
@@ -74876,12 +74876,12 @@
.n(n)
.k(1)
.iterations(1)
- .Test(xnn_qs8_igemm_minmax_fp32_ukernel_3x2__scalar_lrint, xnn_init_qs8_conv_minmax_fp32_scalar_lrint_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qs8_igemm_minmax_fp32_ukernel_3x2__scalar_lrintf, xnn_init_qs8_conv_minmax_fp32_scalar_lrintf_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
}
-TEST(QS8_IGEMM_MINMAX_FP32_3X2__SCALAR_LRINT, k_eq_1_subtile_m) {
+TEST(QS8_IGEMM_MINMAX_FP32_3X2__SCALAR_LRINTF, k_eq_1_subtile_m) {
for (uint32_t m = 1; m <= 3; m++) {
GemmMicrokernelTester()
.mr(3)
@@ -74892,11 +74892,11 @@
.n(2)
.k(1)
.iterations(1)
- .Test(xnn_qs8_igemm_minmax_fp32_ukernel_3x2__scalar_lrint, xnn_init_qs8_conv_minmax_fp32_scalar_lrint_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qs8_igemm_minmax_fp32_ukernel_3x2__scalar_lrintf, xnn_init_qs8_conv_minmax_fp32_scalar_lrintf_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
-TEST(QS8_IGEMM_MINMAX_FP32_3X2__SCALAR_LRINT, k_eq_1_subtile_n) {
+TEST(QS8_IGEMM_MINMAX_FP32_3X2__SCALAR_LRINTF, k_eq_1_subtile_n) {
for (uint32_t n = 1; n <= 2; n++) {
GemmMicrokernelTester()
.mr(3)
@@ -74907,11 +74907,11 @@
.n(n)
.k(1)
.iterations(1)
- .Test(xnn_qs8_igemm_minmax_fp32_ukernel_3x2__scalar_lrint, xnn_init_qs8_conv_minmax_fp32_scalar_lrint_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qs8_igemm_minmax_fp32_ukernel_3x2__scalar_lrintf, xnn_init_qs8_conv_minmax_fp32_scalar_lrintf_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
-TEST(QS8_IGEMM_MINMAX_FP32_3X2__SCALAR_LRINT, k_gt_1) {
+TEST(QS8_IGEMM_MINMAX_FP32_3X2__SCALAR_LRINTF, k_gt_1) {
for (size_t k = 2; k < 10; k++) {
GemmMicrokernelTester()
.mr(3)
@@ -74921,11 +74921,11 @@
.m(3)
.n(2)
.k(k)
- .Test(xnn_qs8_igemm_minmax_fp32_ukernel_3x2__scalar_lrint, xnn_init_qs8_conv_minmax_fp32_scalar_lrint_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qs8_igemm_minmax_fp32_ukernel_3x2__scalar_lrintf, xnn_init_qs8_conv_minmax_fp32_scalar_lrintf_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
-TEST(QS8_IGEMM_MINMAX_FP32_3X2__SCALAR_LRINT, k_gt_1_subtile) {
+TEST(QS8_IGEMM_MINMAX_FP32_3X2__SCALAR_LRINTF, k_gt_1_subtile) {
for (size_t k = 2; k < 10; k++) {
for (uint32_t m = 1; m <= 3; m++) {
for (uint32_t n = 1; n <= 2; n++) {
@@ -74938,13 +74938,13 @@
.n(n)
.k(k)
.iterations(1)
- .Test(xnn_qs8_igemm_minmax_fp32_ukernel_3x2__scalar_lrint, xnn_init_qs8_conv_minmax_fp32_scalar_lrint_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qs8_igemm_minmax_fp32_ukernel_3x2__scalar_lrintf, xnn_init_qs8_conv_minmax_fp32_scalar_lrintf_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
}
}
-TEST(QS8_IGEMM_MINMAX_FP32_3X2__SCALAR_LRINT, n_gt_2) {
+TEST(QS8_IGEMM_MINMAX_FP32_3X2__SCALAR_LRINTF, n_gt_2) {
for (uint32_t n = 3; n < 4; n++) {
for (size_t k = 1; k <= 5; k += 2) {
GemmMicrokernelTester()
@@ -74955,12 +74955,12 @@
.m(3)
.n(2)
.k(k)
- .Test(xnn_qs8_igemm_minmax_fp32_ukernel_3x2__scalar_lrint, xnn_init_qs8_conv_minmax_fp32_scalar_lrint_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qs8_igemm_minmax_fp32_ukernel_3x2__scalar_lrintf, xnn_init_qs8_conv_minmax_fp32_scalar_lrintf_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
}
-TEST(QS8_IGEMM_MINMAX_FP32_3X2__SCALAR_LRINT, n_gt_2_strided_cn) {
+TEST(QS8_IGEMM_MINMAX_FP32_3X2__SCALAR_LRINTF, n_gt_2_strided_cn) {
for (uint32_t n = 3; n < 4; n++) {
for (size_t k = 1; k <= 5; k += 2) {
GemmMicrokernelTester()
@@ -74972,12 +74972,12 @@
.n(2)
.k(k)
.cn_stride(5)
- .Test(xnn_qs8_igemm_minmax_fp32_ukernel_3x2__scalar_lrint, xnn_init_qs8_conv_minmax_fp32_scalar_lrint_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qs8_igemm_minmax_fp32_ukernel_3x2__scalar_lrintf, xnn_init_qs8_conv_minmax_fp32_scalar_lrintf_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
}
-TEST(QS8_IGEMM_MINMAX_FP32_3X2__SCALAR_LRINT, n_gt_2_subtile) {
+TEST(QS8_IGEMM_MINMAX_FP32_3X2__SCALAR_LRINTF, n_gt_2_subtile) {
for (uint32_t n = 3; n < 4; n++) {
for (size_t k = 1; k <= 5; k += 2) {
for (uint32_t m = 1; m <= 3; m++) {
@@ -74990,13 +74990,13 @@
.n(n)
.k(k)
.iterations(1)
- .Test(xnn_qs8_igemm_minmax_fp32_ukernel_3x2__scalar_lrint, xnn_init_qs8_conv_minmax_fp32_scalar_lrint_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qs8_igemm_minmax_fp32_ukernel_3x2__scalar_lrintf, xnn_init_qs8_conv_minmax_fp32_scalar_lrintf_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
}
}
-TEST(QS8_IGEMM_MINMAX_FP32_3X2__SCALAR_LRINT, n_div_2) {
+TEST(QS8_IGEMM_MINMAX_FP32_3X2__SCALAR_LRINTF, n_div_2) {
for (uint32_t n = 4; n <= 6; n += 2) {
for (size_t k = 1; k <= 5; k += 2) {
GemmMicrokernelTester()
@@ -75007,12 +75007,12 @@
.m(3)
.n(2)
.k(k)
- .Test(xnn_qs8_igemm_minmax_fp32_ukernel_3x2__scalar_lrint, xnn_init_qs8_conv_minmax_fp32_scalar_lrint_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qs8_igemm_minmax_fp32_ukernel_3x2__scalar_lrintf, xnn_init_qs8_conv_minmax_fp32_scalar_lrintf_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
}
-TEST(QS8_IGEMM_MINMAX_FP32_3X2__SCALAR_LRINT, n_div_2_strided_cn) {
+TEST(QS8_IGEMM_MINMAX_FP32_3X2__SCALAR_LRINTF, n_div_2_strided_cn) {
for (uint32_t n = 4; n <= 6; n += 2) {
for (size_t k = 1; k <= 5; k += 2) {
GemmMicrokernelTester()
@@ -75024,12 +75024,12 @@
.n(n)
.k(k)
.cn_stride(5)
- .Test(xnn_qs8_igemm_minmax_fp32_ukernel_3x2__scalar_lrint, xnn_init_qs8_conv_minmax_fp32_scalar_lrint_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qs8_igemm_minmax_fp32_ukernel_3x2__scalar_lrintf, xnn_init_qs8_conv_minmax_fp32_scalar_lrintf_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
}
-TEST(QS8_IGEMM_MINMAX_FP32_3X2__SCALAR_LRINT, n_div_2_subtile) {
+TEST(QS8_IGEMM_MINMAX_FP32_3X2__SCALAR_LRINTF, n_div_2_subtile) {
for (uint32_t n = 4; n <= 6; n += 2) {
for (size_t k = 1; k <= 5; k += 2) {
for (uint32_t m = 1; m <= 3; m++) {
@@ -75042,13 +75042,13 @@
.n(n)
.k(k)
.iterations(1)
- .Test(xnn_qs8_igemm_minmax_fp32_ukernel_3x2__scalar_lrint, xnn_init_qs8_conv_minmax_fp32_scalar_lrint_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qs8_igemm_minmax_fp32_ukernel_3x2__scalar_lrintf, xnn_init_qs8_conv_minmax_fp32_scalar_lrintf_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
}
}
-TEST(QS8_IGEMM_MINMAX_FP32_3X2__SCALAR_LRINT, small_kernel) {
+TEST(QS8_IGEMM_MINMAX_FP32_3X2__SCALAR_LRINTF, small_kernel) {
for (size_t k = 1; k <= 5; k += 2) {
GemmMicrokernelTester()
.mr(3)
@@ -75059,11 +75059,11 @@
.n(2)
.k(k)
.ks(3)
- .Test(xnn_qs8_igemm_minmax_fp32_ukernel_3x2__scalar_lrint, xnn_init_qs8_conv_minmax_fp32_scalar_lrint_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qs8_igemm_minmax_fp32_ukernel_3x2__scalar_lrintf, xnn_init_qs8_conv_minmax_fp32_scalar_lrintf_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
-TEST(QS8_IGEMM_MINMAX_FP32_3X2__SCALAR_LRINT, small_kernel_subtile) {
+TEST(QS8_IGEMM_MINMAX_FP32_3X2__SCALAR_LRINTF, small_kernel_subtile) {
for (size_t k = 1; k <= 5; k += 2) {
for (uint32_t m = 1; m <= 3; m++) {
for (uint32_t n = 1; n <= 2; n++) {
@@ -75077,13 +75077,13 @@
.k(k)
.ks(3)
.iterations(1)
- .Test(xnn_qs8_igemm_minmax_fp32_ukernel_3x2__scalar_lrint, xnn_init_qs8_conv_minmax_fp32_scalar_lrint_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qs8_igemm_minmax_fp32_ukernel_3x2__scalar_lrintf, xnn_init_qs8_conv_minmax_fp32_scalar_lrintf_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
}
}
-TEST(QS8_IGEMM_MINMAX_FP32_3X2__SCALAR_LRINT, n_gt_2_small_kernel) {
+TEST(QS8_IGEMM_MINMAX_FP32_3X2__SCALAR_LRINTF, n_gt_2_small_kernel) {
for (uint32_t n = 3; n < 4; n++) {
for (size_t k = 1; k <= 5; k += 2) {
GemmMicrokernelTester()
@@ -75095,12 +75095,12 @@
.n(2)
.k(k)
.ks(3)
- .Test(xnn_qs8_igemm_minmax_fp32_ukernel_3x2__scalar_lrint, xnn_init_qs8_conv_minmax_fp32_scalar_lrint_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qs8_igemm_minmax_fp32_ukernel_3x2__scalar_lrintf, xnn_init_qs8_conv_minmax_fp32_scalar_lrintf_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
}
-TEST(QS8_IGEMM_MINMAX_FP32_3X2__SCALAR_LRINT, n_div_2_small_kernel) {
+TEST(QS8_IGEMM_MINMAX_FP32_3X2__SCALAR_LRINTF, n_div_2_small_kernel) {
for (uint32_t n = 4; n <= 6; n += 2) {
for (size_t k = 1; k <= 5; k += 2) {
GemmMicrokernelTester()
@@ -75112,12 +75112,12 @@
.n(2)
.k(k)
.ks(3)
- .Test(xnn_qs8_igemm_minmax_fp32_ukernel_3x2__scalar_lrint, xnn_init_qs8_conv_minmax_fp32_scalar_lrint_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qs8_igemm_minmax_fp32_ukernel_3x2__scalar_lrintf, xnn_init_qs8_conv_minmax_fp32_scalar_lrintf_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
}
-TEST(QS8_IGEMM_MINMAX_FP32_3X2__SCALAR_LRINT, strided_cm_subtile) {
+TEST(QS8_IGEMM_MINMAX_FP32_3X2__SCALAR_LRINTF, strided_cm_subtile) {
for (size_t k = 1; k <= 5; k += 2) {
for (uint32_t m = 1; m <= 3; m++) {
for (uint32_t n = 1; n <= 2; n++) {
@@ -75131,13 +75131,13 @@
.k(k)
.cm_stride(5)
.iterations(1)
- .Test(xnn_qs8_igemm_minmax_fp32_ukernel_3x2__scalar_lrint, xnn_init_qs8_conv_minmax_fp32_scalar_lrint_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qs8_igemm_minmax_fp32_ukernel_3x2__scalar_lrintf, xnn_init_qs8_conv_minmax_fp32_scalar_lrintf_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
}
}
-TEST(QS8_IGEMM_MINMAX_FP32_3X2__SCALAR_LRINT, a_offset) {
+TEST(QS8_IGEMM_MINMAX_FP32_3X2__SCALAR_LRINTF, a_offset) {
for (size_t k = 1; k <= 5; k += 2) {
GemmMicrokernelTester()
.mr(3)
@@ -75149,11 +75149,11 @@
.k(k)
.ks(3)
.a_offset(17)
- .Test(xnn_qs8_igemm_minmax_fp32_ukernel_3x2__scalar_lrint, xnn_init_qs8_conv_minmax_fp32_scalar_lrint_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qs8_igemm_minmax_fp32_ukernel_3x2__scalar_lrintf, xnn_init_qs8_conv_minmax_fp32_scalar_lrintf_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
-TEST(QS8_IGEMM_MINMAX_FP32_3X2__SCALAR_LRINT, zero) {
+TEST(QS8_IGEMM_MINMAX_FP32_3X2__SCALAR_LRINTF, zero) {
for (uint32_t mz = 0; mz < 3; mz++) {
for (size_t k = 1; k <= 5; k += 2) {
GemmMicrokernelTester()
@@ -75167,12 +75167,12 @@
.ks(3)
.a_offset(17)
.zero_index(mz)
- .Test(xnn_qs8_igemm_minmax_fp32_ukernel_3x2__scalar_lrint, xnn_init_qs8_conv_minmax_fp32_scalar_lrint_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qs8_igemm_minmax_fp32_ukernel_3x2__scalar_lrintf, xnn_init_qs8_conv_minmax_fp32_scalar_lrintf_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
}
-TEST(QS8_IGEMM_MINMAX_FP32_3X2__SCALAR_LRINT, qmin) {
+TEST(QS8_IGEMM_MINMAX_FP32_3X2__SCALAR_LRINTF, qmin) {
GemmMicrokernelTester()
.mr(3)
.nr(2)
@@ -75182,10 +75182,10 @@
.n(2)
.k(1)
.qmin(128)
- .Test(xnn_qs8_igemm_minmax_fp32_ukernel_3x2__scalar_lrint, xnn_init_qs8_conv_minmax_fp32_scalar_lrint_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qs8_igemm_minmax_fp32_ukernel_3x2__scalar_lrintf, xnn_init_qs8_conv_minmax_fp32_scalar_lrintf_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
-TEST(QS8_IGEMM_MINMAX_FP32_3X2__SCALAR_LRINT, qmax) {
+TEST(QS8_IGEMM_MINMAX_FP32_3X2__SCALAR_LRINTF, qmax) {
GemmMicrokernelTester()
.mr(3)
.nr(2)
@@ -75195,10 +75195,10 @@
.n(2)
.k(1)
.qmax(128)
- .Test(xnn_qs8_igemm_minmax_fp32_ukernel_3x2__scalar_lrint, xnn_init_qs8_conv_minmax_fp32_scalar_lrint_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qs8_igemm_minmax_fp32_ukernel_3x2__scalar_lrintf, xnn_init_qs8_conv_minmax_fp32_scalar_lrintf_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
-TEST(QS8_IGEMM_MINMAX_FP32_3X2__SCALAR_LRINT, strided_cm) {
+TEST(QS8_IGEMM_MINMAX_FP32_3X2__SCALAR_LRINTF, strided_cm) {
GemmMicrokernelTester()
.mr(3)
.nr(2)
@@ -75208,11 +75208,11 @@
.n(2)
.k(1)
.cm_stride(5)
- .Test(xnn_qs8_igemm_minmax_fp32_ukernel_3x2__scalar_lrint, xnn_init_qs8_conv_minmax_fp32_scalar_lrint_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qs8_igemm_minmax_fp32_ukernel_3x2__scalar_lrintf, xnn_init_qs8_conv_minmax_fp32_scalar_lrintf_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
-TEST(QS8_IGEMM_MINMAX_FP32_4X2__SCALAR_LRINT, k_eq_1) {
+TEST(QS8_IGEMM_MINMAX_FP32_4X2__SCALAR_LRINTF, k_eq_1) {
GemmMicrokernelTester()
.mr(4)
.nr(2)
@@ -75221,10 +75221,10 @@
.m(4)
.n(2)
.k(1)
- .Test(xnn_qs8_igemm_minmax_fp32_ukernel_4x2__scalar_lrint, xnn_init_qs8_conv_minmax_fp32_scalar_lrint_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qs8_igemm_minmax_fp32_ukernel_4x2__scalar_lrintf, xnn_init_qs8_conv_minmax_fp32_scalar_lrintf_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
-TEST(QS8_IGEMM_MINMAX_FP32_4X2__SCALAR_LRINT, strided_cn) {
+TEST(QS8_IGEMM_MINMAX_FP32_4X2__SCALAR_LRINTF, strided_cn) {
GemmMicrokernelTester()
.mr(4)
.nr(2)
@@ -75234,10 +75234,10 @@
.n(2)
.k(1)
.cn_stride(5)
- .Test(xnn_qs8_igemm_minmax_fp32_ukernel_4x2__scalar_lrint, xnn_init_qs8_conv_minmax_fp32_scalar_lrint_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qs8_igemm_minmax_fp32_ukernel_4x2__scalar_lrintf, xnn_init_qs8_conv_minmax_fp32_scalar_lrintf_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
-TEST(QS8_IGEMM_MINMAX_FP32_4X2__SCALAR_LRINT, k_eq_1_subtile) {
+TEST(QS8_IGEMM_MINMAX_FP32_4X2__SCALAR_LRINTF, k_eq_1_subtile) {
for (uint32_t m = 1; m <= 4; m++) {
for (uint32_t n = 1; n <= 2; n++) {
GemmMicrokernelTester()
@@ -75249,12 +75249,12 @@
.n(n)
.k(1)
.iterations(1)
- .Test(xnn_qs8_igemm_minmax_fp32_ukernel_4x2__scalar_lrint, xnn_init_qs8_conv_minmax_fp32_scalar_lrint_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qs8_igemm_minmax_fp32_ukernel_4x2__scalar_lrintf, xnn_init_qs8_conv_minmax_fp32_scalar_lrintf_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
}
-TEST(QS8_IGEMM_MINMAX_FP32_4X2__SCALAR_LRINT, k_eq_1_subtile_m) {
+TEST(QS8_IGEMM_MINMAX_FP32_4X2__SCALAR_LRINTF, k_eq_1_subtile_m) {
for (uint32_t m = 1; m <= 4; m++) {
GemmMicrokernelTester()
.mr(4)
@@ -75265,11 +75265,11 @@
.n(2)
.k(1)
.iterations(1)
- .Test(xnn_qs8_igemm_minmax_fp32_ukernel_4x2__scalar_lrint, xnn_init_qs8_conv_minmax_fp32_scalar_lrint_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qs8_igemm_minmax_fp32_ukernel_4x2__scalar_lrintf, xnn_init_qs8_conv_minmax_fp32_scalar_lrintf_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
-TEST(QS8_IGEMM_MINMAX_FP32_4X2__SCALAR_LRINT, k_eq_1_subtile_n) {
+TEST(QS8_IGEMM_MINMAX_FP32_4X2__SCALAR_LRINTF, k_eq_1_subtile_n) {
for (uint32_t n = 1; n <= 2; n++) {
GemmMicrokernelTester()
.mr(4)
@@ -75280,11 +75280,11 @@
.n(n)
.k(1)
.iterations(1)
- .Test(xnn_qs8_igemm_minmax_fp32_ukernel_4x2__scalar_lrint, xnn_init_qs8_conv_minmax_fp32_scalar_lrint_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qs8_igemm_minmax_fp32_ukernel_4x2__scalar_lrintf, xnn_init_qs8_conv_minmax_fp32_scalar_lrintf_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
-TEST(QS8_IGEMM_MINMAX_FP32_4X2__SCALAR_LRINT, k_gt_1) {
+TEST(QS8_IGEMM_MINMAX_FP32_4X2__SCALAR_LRINTF, k_gt_1) {
for (size_t k = 2; k < 10; k++) {
GemmMicrokernelTester()
.mr(4)
@@ -75294,11 +75294,11 @@
.m(4)
.n(2)
.k(k)
- .Test(xnn_qs8_igemm_minmax_fp32_ukernel_4x2__scalar_lrint, xnn_init_qs8_conv_minmax_fp32_scalar_lrint_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qs8_igemm_minmax_fp32_ukernel_4x2__scalar_lrintf, xnn_init_qs8_conv_minmax_fp32_scalar_lrintf_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
-TEST(QS8_IGEMM_MINMAX_FP32_4X2__SCALAR_LRINT, k_gt_1_subtile) {
+TEST(QS8_IGEMM_MINMAX_FP32_4X2__SCALAR_LRINTF, k_gt_1_subtile) {
for (size_t k = 2; k < 10; k++) {
for (uint32_t m = 1; m <= 4; m++) {
for (uint32_t n = 1; n <= 2; n++) {
@@ -75311,13 +75311,13 @@
.n(n)
.k(k)
.iterations(1)
- .Test(xnn_qs8_igemm_minmax_fp32_ukernel_4x2__scalar_lrint, xnn_init_qs8_conv_minmax_fp32_scalar_lrint_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qs8_igemm_minmax_fp32_ukernel_4x2__scalar_lrintf, xnn_init_qs8_conv_minmax_fp32_scalar_lrintf_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
}
}
-TEST(QS8_IGEMM_MINMAX_FP32_4X2__SCALAR_LRINT, n_gt_2) {
+TEST(QS8_IGEMM_MINMAX_FP32_4X2__SCALAR_LRINTF, n_gt_2) {
for (uint32_t n = 3; n < 4; n++) {
for (size_t k = 1; k <= 5; k += 2) {
GemmMicrokernelTester()
@@ -75328,12 +75328,12 @@
.m(4)
.n(2)
.k(k)
- .Test(xnn_qs8_igemm_minmax_fp32_ukernel_4x2__scalar_lrint, xnn_init_qs8_conv_minmax_fp32_scalar_lrint_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qs8_igemm_minmax_fp32_ukernel_4x2__scalar_lrintf, xnn_init_qs8_conv_minmax_fp32_scalar_lrintf_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
}
-TEST(QS8_IGEMM_MINMAX_FP32_4X2__SCALAR_LRINT, n_gt_2_strided_cn) {
+TEST(QS8_IGEMM_MINMAX_FP32_4X2__SCALAR_LRINTF, n_gt_2_strided_cn) {
for (uint32_t n = 3; n < 4; n++) {
for (size_t k = 1; k <= 5; k += 2) {
GemmMicrokernelTester()
@@ -75345,12 +75345,12 @@
.n(2)
.k(k)
.cn_stride(5)
- .Test(xnn_qs8_igemm_minmax_fp32_ukernel_4x2__scalar_lrint, xnn_init_qs8_conv_minmax_fp32_scalar_lrint_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qs8_igemm_minmax_fp32_ukernel_4x2__scalar_lrintf, xnn_init_qs8_conv_minmax_fp32_scalar_lrintf_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
}
-TEST(QS8_IGEMM_MINMAX_FP32_4X2__SCALAR_LRINT, n_gt_2_subtile) {
+TEST(QS8_IGEMM_MINMAX_FP32_4X2__SCALAR_LRINTF, n_gt_2_subtile) {
for (uint32_t n = 3; n < 4; n++) {
for (size_t k = 1; k <= 5; k += 2) {
for (uint32_t m = 1; m <= 4; m++) {
@@ -75363,13 +75363,13 @@
.n(n)
.k(k)
.iterations(1)
- .Test(xnn_qs8_igemm_minmax_fp32_ukernel_4x2__scalar_lrint, xnn_init_qs8_conv_minmax_fp32_scalar_lrint_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qs8_igemm_minmax_fp32_ukernel_4x2__scalar_lrintf, xnn_init_qs8_conv_minmax_fp32_scalar_lrintf_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
}
}
-TEST(QS8_IGEMM_MINMAX_FP32_4X2__SCALAR_LRINT, n_div_2) {
+TEST(QS8_IGEMM_MINMAX_FP32_4X2__SCALAR_LRINTF, n_div_2) {
for (uint32_t n = 4; n <= 6; n += 2) {
for (size_t k = 1; k <= 5; k += 2) {
GemmMicrokernelTester()
@@ -75380,12 +75380,12 @@
.m(4)
.n(2)
.k(k)
- .Test(xnn_qs8_igemm_minmax_fp32_ukernel_4x2__scalar_lrint, xnn_init_qs8_conv_minmax_fp32_scalar_lrint_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qs8_igemm_minmax_fp32_ukernel_4x2__scalar_lrintf, xnn_init_qs8_conv_minmax_fp32_scalar_lrintf_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
}
-TEST(QS8_IGEMM_MINMAX_FP32_4X2__SCALAR_LRINT, n_div_2_strided_cn) {
+TEST(QS8_IGEMM_MINMAX_FP32_4X2__SCALAR_LRINTF, n_div_2_strided_cn) {
for (uint32_t n = 4; n <= 6; n += 2) {
for (size_t k = 1; k <= 5; k += 2) {
GemmMicrokernelTester()
@@ -75397,12 +75397,12 @@
.n(n)
.k(k)
.cn_stride(5)
- .Test(xnn_qs8_igemm_minmax_fp32_ukernel_4x2__scalar_lrint, xnn_init_qs8_conv_minmax_fp32_scalar_lrint_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qs8_igemm_minmax_fp32_ukernel_4x2__scalar_lrintf, xnn_init_qs8_conv_minmax_fp32_scalar_lrintf_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
}
-TEST(QS8_IGEMM_MINMAX_FP32_4X2__SCALAR_LRINT, n_div_2_subtile) {
+TEST(QS8_IGEMM_MINMAX_FP32_4X2__SCALAR_LRINTF, n_div_2_subtile) {
for (uint32_t n = 4; n <= 6; n += 2) {
for (size_t k = 1; k <= 5; k += 2) {
for (uint32_t m = 1; m <= 4; m++) {
@@ -75415,13 +75415,13 @@
.n(n)
.k(k)
.iterations(1)
- .Test(xnn_qs8_igemm_minmax_fp32_ukernel_4x2__scalar_lrint, xnn_init_qs8_conv_minmax_fp32_scalar_lrint_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qs8_igemm_minmax_fp32_ukernel_4x2__scalar_lrintf, xnn_init_qs8_conv_minmax_fp32_scalar_lrintf_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
}
}
-TEST(QS8_IGEMM_MINMAX_FP32_4X2__SCALAR_LRINT, small_kernel) {
+TEST(QS8_IGEMM_MINMAX_FP32_4X2__SCALAR_LRINTF, small_kernel) {
for (size_t k = 1; k <= 5; k += 2) {
GemmMicrokernelTester()
.mr(4)
@@ -75432,11 +75432,11 @@
.n(2)
.k(k)
.ks(3)
- .Test(xnn_qs8_igemm_minmax_fp32_ukernel_4x2__scalar_lrint, xnn_init_qs8_conv_minmax_fp32_scalar_lrint_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qs8_igemm_minmax_fp32_ukernel_4x2__scalar_lrintf, xnn_init_qs8_conv_minmax_fp32_scalar_lrintf_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
-TEST(QS8_IGEMM_MINMAX_FP32_4X2__SCALAR_LRINT, small_kernel_subtile) {
+TEST(QS8_IGEMM_MINMAX_FP32_4X2__SCALAR_LRINTF, small_kernel_subtile) {
for (size_t k = 1; k <= 5; k += 2) {
for (uint32_t m = 1; m <= 4; m++) {
for (uint32_t n = 1; n <= 2; n++) {
@@ -75450,13 +75450,13 @@
.k(k)
.ks(3)
.iterations(1)
- .Test(xnn_qs8_igemm_minmax_fp32_ukernel_4x2__scalar_lrint, xnn_init_qs8_conv_minmax_fp32_scalar_lrint_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qs8_igemm_minmax_fp32_ukernel_4x2__scalar_lrintf, xnn_init_qs8_conv_minmax_fp32_scalar_lrintf_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
}
}
-TEST(QS8_IGEMM_MINMAX_FP32_4X2__SCALAR_LRINT, n_gt_2_small_kernel) {
+TEST(QS8_IGEMM_MINMAX_FP32_4X2__SCALAR_LRINTF, n_gt_2_small_kernel) {
for (uint32_t n = 3; n < 4; n++) {
for (size_t k = 1; k <= 5; k += 2) {
GemmMicrokernelTester()
@@ -75468,12 +75468,12 @@
.n(2)
.k(k)
.ks(3)
- .Test(xnn_qs8_igemm_minmax_fp32_ukernel_4x2__scalar_lrint, xnn_init_qs8_conv_minmax_fp32_scalar_lrint_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qs8_igemm_minmax_fp32_ukernel_4x2__scalar_lrintf, xnn_init_qs8_conv_minmax_fp32_scalar_lrintf_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
}
-TEST(QS8_IGEMM_MINMAX_FP32_4X2__SCALAR_LRINT, n_div_2_small_kernel) {
+TEST(QS8_IGEMM_MINMAX_FP32_4X2__SCALAR_LRINTF, n_div_2_small_kernel) {
for (uint32_t n = 4; n <= 6; n += 2) {
for (size_t k = 1; k <= 5; k += 2) {
GemmMicrokernelTester()
@@ -75485,12 +75485,12 @@
.n(2)
.k(k)
.ks(3)
- .Test(xnn_qs8_igemm_minmax_fp32_ukernel_4x2__scalar_lrint, xnn_init_qs8_conv_minmax_fp32_scalar_lrint_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qs8_igemm_minmax_fp32_ukernel_4x2__scalar_lrintf, xnn_init_qs8_conv_minmax_fp32_scalar_lrintf_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
}
-TEST(QS8_IGEMM_MINMAX_FP32_4X2__SCALAR_LRINT, strided_cm_subtile) {
+TEST(QS8_IGEMM_MINMAX_FP32_4X2__SCALAR_LRINTF, strided_cm_subtile) {
for (size_t k = 1; k <= 5; k += 2) {
for (uint32_t m = 1; m <= 4; m++) {
for (uint32_t n = 1; n <= 2; n++) {
@@ -75504,13 +75504,13 @@
.k(k)
.cm_stride(5)
.iterations(1)
- .Test(xnn_qs8_igemm_minmax_fp32_ukernel_4x2__scalar_lrint, xnn_init_qs8_conv_minmax_fp32_scalar_lrint_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qs8_igemm_minmax_fp32_ukernel_4x2__scalar_lrintf, xnn_init_qs8_conv_minmax_fp32_scalar_lrintf_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
}
}
-TEST(QS8_IGEMM_MINMAX_FP32_4X2__SCALAR_LRINT, a_offset) {
+TEST(QS8_IGEMM_MINMAX_FP32_4X2__SCALAR_LRINTF, a_offset) {
for (size_t k = 1; k <= 5; k += 2) {
GemmMicrokernelTester()
.mr(4)
@@ -75522,11 +75522,11 @@
.k(k)
.ks(3)
.a_offset(23)
- .Test(xnn_qs8_igemm_minmax_fp32_ukernel_4x2__scalar_lrint, xnn_init_qs8_conv_minmax_fp32_scalar_lrint_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qs8_igemm_minmax_fp32_ukernel_4x2__scalar_lrintf, xnn_init_qs8_conv_minmax_fp32_scalar_lrintf_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
-TEST(QS8_IGEMM_MINMAX_FP32_4X2__SCALAR_LRINT, zero) {
+TEST(QS8_IGEMM_MINMAX_FP32_4X2__SCALAR_LRINTF, zero) {
for (uint32_t mz = 0; mz < 4; mz++) {
for (size_t k = 1; k <= 5; k += 2) {
GemmMicrokernelTester()
@@ -75540,12 +75540,12 @@
.ks(3)
.a_offset(23)
.zero_index(mz)
- .Test(xnn_qs8_igemm_minmax_fp32_ukernel_4x2__scalar_lrint, xnn_init_qs8_conv_minmax_fp32_scalar_lrint_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qs8_igemm_minmax_fp32_ukernel_4x2__scalar_lrintf, xnn_init_qs8_conv_minmax_fp32_scalar_lrintf_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
}
-TEST(QS8_IGEMM_MINMAX_FP32_4X2__SCALAR_LRINT, qmin) {
+TEST(QS8_IGEMM_MINMAX_FP32_4X2__SCALAR_LRINTF, qmin) {
GemmMicrokernelTester()
.mr(4)
.nr(2)
@@ -75555,10 +75555,10 @@
.n(2)
.k(1)
.qmin(128)
- .Test(xnn_qs8_igemm_minmax_fp32_ukernel_4x2__scalar_lrint, xnn_init_qs8_conv_minmax_fp32_scalar_lrint_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qs8_igemm_minmax_fp32_ukernel_4x2__scalar_lrintf, xnn_init_qs8_conv_minmax_fp32_scalar_lrintf_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
-TEST(QS8_IGEMM_MINMAX_FP32_4X2__SCALAR_LRINT, qmax) {
+TEST(QS8_IGEMM_MINMAX_FP32_4X2__SCALAR_LRINTF, qmax) {
GemmMicrokernelTester()
.mr(4)
.nr(2)
@@ -75568,10 +75568,10 @@
.n(2)
.k(1)
.qmax(128)
- .Test(xnn_qs8_igemm_minmax_fp32_ukernel_4x2__scalar_lrint, xnn_init_qs8_conv_minmax_fp32_scalar_lrint_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qs8_igemm_minmax_fp32_ukernel_4x2__scalar_lrintf, xnn_init_qs8_conv_minmax_fp32_scalar_lrintf_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
-TEST(QS8_IGEMM_MINMAX_FP32_4X2__SCALAR_LRINT, strided_cm) {
+TEST(QS8_IGEMM_MINMAX_FP32_4X2__SCALAR_LRINTF, strided_cm) {
GemmMicrokernelTester()
.mr(4)
.nr(2)
@@ -75581,11 +75581,11 @@
.n(2)
.k(1)
.cm_stride(5)
- .Test(xnn_qs8_igemm_minmax_fp32_ukernel_4x2__scalar_lrint, xnn_init_qs8_conv_minmax_fp32_scalar_lrint_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qs8_igemm_minmax_fp32_ukernel_4x2__scalar_lrintf, xnn_init_qs8_conv_minmax_fp32_scalar_lrintf_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
-TEST(QS8_IGEMM_MINMAX_FP32_1X4__SCALAR_LRINT, k_eq_1) {
+TEST(QS8_IGEMM_MINMAX_FP32_1X4__SCALAR_LRINTF, k_eq_1) {
GemmMicrokernelTester()
.mr(1)
.nr(4)
@@ -75594,10 +75594,10 @@
.m(1)
.n(4)
.k(1)
- .Test(xnn_qs8_igemm_minmax_fp32_ukernel_1x4__scalar_lrint, xnn_init_qs8_conv_minmax_fp32_scalar_lrint_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qs8_igemm_minmax_fp32_ukernel_1x4__scalar_lrintf, xnn_init_qs8_conv_minmax_fp32_scalar_lrintf_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
-TEST(QS8_IGEMM_MINMAX_FP32_1X4__SCALAR_LRINT, strided_cn) {
+TEST(QS8_IGEMM_MINMAX_FP32_1X4__SCALAR_LRINTF, strided_cn) {
GemmMicrokernelTester()
.mr(1)
.nr(4)
@@ -75607,10 +75607,10 @@
.n(4)
.k(1)
.cn_stride(7)
- .Test(xnn_qs8_igemm_minmax_fp32_ukernel_1x4__scalar_lrint, xnn_init_qs8_conv_minmax_fp32_scalar_lrint_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qs8_igemm_minmax_fp32_ukernel_1x4__scalar_lrintf, xnn_init_qs8_conv_minmax_fp32_scalar_lrintf_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
-TEST(QS8_IGEMM_MINMAX_FP32_1X4__SCALAR_LRINT, k_eq_1_subtile) {
+TEST(QS8_IGEMM_MINMAX_FP32_1X4__SCALAR_LRINTF, k_eq_1_subtile) {
for (uint32_t m = 1; m <= 1; m++) {
for (uint32_t n = 1; n <= 4; n++) {
GemmMicrokernelTester()
@@ -75622,12 +75622,12 @@
.n(n)
.k(1)
.iterations(1)
- .Test(xnn_qs8_igemm_minmax_fp32_ukernel_1x4__scalar_lrint, xnn_init_qs8_conv_minmax_fp32_scalar_lrint_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qs8_igemm_minmax_fp32_ukernel_1x4__scalar_lrintf, xnn_init_qs8_conv_minmax_fp32_scalar_lrintf_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
}
-TEST(QS8_IGEMM_MINMAX_FP32_1X4__SCALAR_LRINT, k_eq_1_subtile_m) {
+TEST(QS8_IGEMM_MINMAX_FP32_1X4__SCALAR_LRINTF, k_eq_1_subtile_m) {
for (uint32_t m = 1; m <= 1; m++) {
GemmMicrokernelTester()
.mr(1)
@@ -75638,11 +75638,11 @@
.n(4)
.k(1)
.iterations(1)
- .Test(xnn_qs8_igemm_minmax_fp32_ukernel_1x4__scalar_lrint, xnn_init_qs8_conv_minmax_fp32_scalar_lrint_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qs8_igemm_minmax_fp32_ukernel_1x4__scalar_lrintf, xnn_init_qs8_conv_minmax_fp32_scalar_lrintf_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
-TEST(QS8_IGEMM_MINMAX_FP32_1X4__SCALAR_LRINT, k_eq_1_subtile_n) {
+TEST(QS8_IGEMM_MINMAX_FP32_1X4__SCALAR_LRINTF, k_eq_1_subtile_n) {
for (uint32_t n = 1; n <= 4; n++) {
GemmMicrokernelTester()
.mr(1)
@@ -75653,11 +75653,11 @@
.n(n)
.k(1)
.iterations(1)
- .Test(xnn_qs8_igemm_minmax_fp32_ukernel_1x4__scalar_lrint, xnn_init_qs8_conv_minmax_fp32_scalar_lrint_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qs8_igemm_minmax_fp32_ukernel_1x4__scalar_lrintf, xnn_init_qs8_conv_minmax_fp32_scalar_lrintf_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
-TEST(QS8_IGEMM_MINMAX_FP32_1X4__SCALAR_LRINT, k_gt_1) {
+TEST(QS8_IGEMM_MINMAX_FP32_1X4__SCALAR_LRINTF, k_gt_1) {
for (size_t k = 2; k < 10; k++) {
GemmMicrokernelTester()
.mr(1)
@@ -75667,11 +75667,11 @@
.m(1)
.n(4)
.k(k)
- .Test(xnn_qs8_igemm_minmax_fp32_ukernel_1x4__scalar_lrint, xnn_init_qs8_conv_minmax_fp32_scalar_lrint_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qs8_igemm_minmax_fp32_ukernel_1x4__scalar_lrintf, xnn_init_qs8_conv_minmax_fp32_scalar_lrintf_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
-TEST(QS8_IGEMM_MINMAX_FP32_1X4__SCALAR_LRINT, k_gt_1_subtile) {
+TEST(QS8_IGEMM_MINMAX_FP32_1X4__SCALAR_LRINTF, k_gt_1_subtile) {
for (size_t k = 2; k < 10; k++) {
for (uint32_t m = 1; m <= 1; m++) {
for (uint32_t n = 1; n <= 4; n++) {
@@ -75684,13 +75684,13 @@
.n(n)
.k(k)
.iterations(1)
- .Test(xnn_qs8_igemm_minmax_fp32_ukernel_1x4__scalar_lrint, xnn_init_qs8_conv_minmax_fp32_scalar_lrint_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qs8_igemm_minmax_fp32_ukernel_1x4__scalar_lrintf, xnn_init_qs8_conv_minmax_fp32_scalar_lrintf_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
}
}
-TEST(QS8_IGEMM_MINMAX_FP32_1X4__SCALAR_LRINT, n_gt_4) {
+TEST(QS8_IGEMM_MINMAX_FP32_1X4__SCALAR_LRINTF, n_gt_4) {
for (uint32_t n = 5; n < 8; n++) {
for (size_t k = 1; k <= 5; k += 2) {
GemmMicrokernelTester()
@@ -75701,12 +75701,12 @@
.m(1)
.n(4)
.k(k)
- .Test(xnn_qs8_igemm_minmax_fp32_ukernel_1x4__scalar_lrint, xnn_init_qs8_conv_minmax_fp32_scalar_lrint_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qs8_igemm_minmax_fp32_ukernel_1x4__scalar_lrintf, xnn_init_qs8_conv_minmax_fp32_scalar_lrintf_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
}
-TEST(QS8_IGEMM_MINMAX_FP32_1X4__SCALAR_LRINT, n_gt_4_strided_cn) {
+TEST(QS8_IGEMM_MINMAX_FP32_1X4__SCALAR_LRINTF, n_gt_4_strided_cn) {
for (uint32_t n = 5; n < 8; n++) {
for (size_t k = 1; k <= 5; k += 2) {
GemmMicrokernelTester()
@@ -75718,12 +75718,12 @@
.n(4)
.k(k)
.cn_stride(7)
- .Test(xnn_qs8_igemm_minmax_fp32_ukernel_1x4__scalar_lrint, xnn_init_qs8_conv_minmax_fp32_scalar_lrint_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qs8_igemm_minmax_fp32_ukernel_1x4__scalar_lrintf, xnn_init_qs8_conv_minmax_fp32_scalar_lrintf_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
}
-TEST(QS8_IGEMM_MINMAX_FP32_1X4__SCALAR_LRINT, n_gt_4_subtile) {
+TEST(QS8_IGEMM_MINMAX_FP32_1X4__SCALAR_LRINTF, n_gt_4_subtile) {
for (uint32_t n = 5; n < 8; n++) {
for (size_t k = 1; k <= 5; k += 2) {
for (uint32_t m = 1; m <= 1; m++) {
@@ -75736,13 +75736,13 @@
.n(n)
.k(k)
.iterations(1)
- .Test(xnn_qs8_igemm_minmax_fp32_ukernel_1x4__scalar_lrint, xnn_init_qs8_conv_minmax_fp32_scalar_lrint_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qs8_igemm_minmax_fp32_ukernel_1x4__scalar_lrintf, xnn_init_qs8_conv_minmax_fp32_scalar_lrintf_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
}
}
-TEST(QS8_IGEMM_MINMAX_FP32_1X4__SCALAR_LRINT, n_div_4) {
+TEST(QS8_IGEMM_MINMAX_FP32_1X4__SCALAR_LRINTF, n_div_4) {
for (uint32_t n = 8; n <= 12; n += 4) {
for (size_t k = 1; k <= 5; k += 2) {
GemmMicrokernelTester()
@@ -75753,12 +75753,12 @@
.m(1)
.n(4)
.k(k)
- .Test(xnn_qs8_igemm_minmax_fp32_ukernel_1x4__scalar_lrint, xnn_init_qs8_conv_minmax_fp32_scalar_lrint_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qs8_igemm_minmax_fp32_ukernel_1x4__scalar_lrintf, xnn_init_qs8_conv_minmax_fp32_scalar_lrintf_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
}
-TEST(QS8_IGEMM_MINMAX_FP32_1X4__SCALAR_LRINT, n_div_4_strided_cn) {
+TEST(QS8_IGEMM_MINMAX_FP32_1X4__SCALAR_LRINTF, n_div_4_strided_cn) {
for (uint32_t n = 8; n <= 12; n += 4) {
for (size_t k = 1; k <= 5; k += 2) {
GemmMicrokernelTester()
@@ -75770,12 +75770,12 @@
.n(n)
.k(k)
.cn_stride(7)
- .Test(xnn_qs8_igemm_minmax_fp32_ukernel_1x4__scalar_lrint, xnn_init_qs8_conv_minmax_fp32_scalar_lrint_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qs8_igemm_minmax_fp32_ukernel_1x4__scalar_lrintf, xnn_init_qs8_conv_minmax_fp32_scalar_lrintf_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
}
-TEST(QS8_IGEMM_MINMAX_FP32_1X4__SCALAR_LRINT, n_div_4_subtile) {
+TEST(QS8_IGEMM_MINMAX_FP32_1X4__SCALAR_LRINTF, n_div_4_subtile) {
for (uint32_t n = 8; n <= 12; n += 4) {
for (size_t k = 1; k <= 5; k += 2) {
for (uint32_t m = 1; m <= 1; m++) {
@@ -75788,13 +75788,13 @@
.n(n)
.k(k)
.iterations(1)
- .Test(xnn_qs8_igemm_minmax_fp32_ukernel_1x4__scalar_lrint, xnn_init_qs8_conv_minmax_fp32_scalar_lrint_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qs8_igemm_minmax_fp32_ukernel_1x4__scalar_lrintf, xnn_init_qs8_conv_minmax_fp32_scalar_lrintf_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
}
}
-TEST(QS8_IGEMM_MINMAX_FP32_1X4__SCALAR_LRINT, small_kernel) {
+TEST(QS8_IGEMM_MINMAX_FP32_1X4__SCALAR_LRINTF, small_kernel) {
for (size_t k = 1; k <= 5; k += 2) {
GemmMicrokernelTester()
.mr(1)
@@ -75805,11 +75805,11 @@
.n(4)
.k(k)
.ks(3)
- .Test(xnn_qs8_igemm_minmax_fp32_ukernel_1x4__scalar_lrint, xnn_init_qs8_conv_minmax_fp32_scalar_lrint_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qs8_igemm_minmax_fp32_ukernel_1x4__scalar_lrintf, xnn_init_qs8_conv_minmax_fp32_scalar_lrintf_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
-TEST(QS8_IGEMM_MINMAX_FP32_1X4__SCALAR_LRINT, small_kernel_subtile) {
+TEST(QS8_IGEMM_MINMAX_FP32_1X4__SCALAR_LRINTF, small_kernel_subtile) {
for (size_t k = 1; k <= 5; k += 2) {
for (uint32_t m = 1; m <= 1; m++) {
for (uint32_t n = 1; n <= 4; n++) {
@@ -75823,13 +75823,13 @@
.k(k)
.ks(3)
.iterations(1)
- .Test(xnn_qs8_igemm_minmax_fp32_ukernel_1x4__scalar_lrint, xnn_init_qs8_conv_minmax_fp32_scalar_lrint_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qs8_igemm_minmax_fp32_ukernel_1x4__scalar_lrintf, xnn_init_qs8_conv_minmax_fp32_scalar_lrintf_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
}
}
-TEST(QS8_IGEMM_MINMAX_FP32_1X4__SCALAR_LRINT, n_gt_4_small_kernel) {
+TEST(QS8_IGEMM_MINMAX_FP32_1X4__SCALAR_LRINTF, n_gt_4_small_kernel) {
for (uint32_t n = 5; n < 8; n++) {
for (size_t k = 1; k <= 5; k += 2) {
GemmMicrokernelTester()
@@ -75841,12 +75841,12 @@
.n(4)
.k(k)
.ks(3)
- .Test(xnn_qs8_igemm_minmax_fp32_ukernel_1x4__scalar_lrint, xnn_init_qs8_conv_minmax_fp32_scalar_lrint_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qs8_igemm_minmax_fp32_ukernel_1x4__scalar_lrintf, xnn_init_qs8_conv_minmax_fp32_scalar_lrintf_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
}
-TEST(QS8_IGEMM_MINMAX_FP32_1X4__SCALAR_LRINT, n_div_4_small_kernel) {
+TEST(QS8_IGEMM_MINMAX_FP32_1X4__SCALAR_LRINTF, n_div_4_small_kernel) {
for (uint32_t n = 8; n <= 12; n += 4) {
for (size_t k = 1; k <= 5; k += 2) {
GemmMicrokernelTester()
@@ -75858,12 +75858,12 @@
.n(4)
.k(k)
.ks(3)
- .Test(xnn_qs8_igemm_minmax_fp32_ukernel_1x4__scalar_lrint, xnn_init_qs8_conv_minmax_fp32_scalar_lrint_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qs8_igemm_minmax_fp32_ukernel_1x4__scalar_lrintf, xnn_init_qs8_conv_minmax_fp32_scalar_lrintf_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
}
-TEST(QS8_IGEMM_MINMAX_FP32_1X4__SCALAR_LRINT, strided_cm_subtile) {
+TEST(QS8_IGEMM_MINMAX_FP32_1X4__SCALAR_LRINTF, strided_cm_subtile) {
for (size_t k = 1; k <= 5; k += 2) {
for (uint32_t m = 1; m <= 1; m++) {
for (uint32_t n = 1; n <= 4; n++) {
@@ -75877,13 +75877,13 @@
.k(k)
.cm_stride(7)
.iterations(1)
- .Test(xnn_qs8_igemm_minmax_fp32_ukernel_1x4__scalar_lrint, xnn_init_qs8_conv_minmax_fp32_scalar_lrint_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qs8_igemm_minmax_fp32_ukernel_1x4__scalar_lrintf, xnn_init_qs8_conv_minmax_fp32_scalar_lrintf_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
}
}
-TEST(QS8_IGEMM_MINMAX_FP32_1X4__SCALAR_LRINT, a_offset) {
+TEST(QS8_IGEMM_MINMAX_FP32_1X4__SCALAR_LRINTF, a_offset) {
for (size_t k = 1; k <= 5; k += 2) {
GemmMicrokernelTester()
.mr(1)
@@ -75895,11 +75895,11 @@
.k(k)
.ks(3)
.a_offset(7)
- .Test(xnn_qs8_igemm_minmax_fp32_ukernel_1x4__scalar_lrint, xnn_init_qs8_conv_minmax_fp32_scalar_lrint_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qs8_igemm_minmax_fp32_ukernel_1x4__scalar_lrintf, xnn_init_qs8_conv_minmax_fp32_scalar_lrintf_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
-TEST(QS8_IGEMM_MINMAX_FP32_1X4__SCALAR_LRINT, zero) {
+TEST(QS8_IGEMM_MINMAX_FP32_1X4__SCALAR_LRINTF, zero) {
for (uint32_t mz = 0; mz < 1; mz++) {
for (size_t k = 1; k <= 5; k += 2) {
GemmMicrokernelTester()
@@ -75913,12 +75913,12 @@
.ks(3)
.a_offset(7)
.zero_index(mz)
- .Test(xnn_qs8_igemm_minmax_fp32_ukernel_1x4__scalar_lrint, xnn_init_qs8_conv_minmax_fp32_scalar_lrint_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qs8_igemm_minmax_fp32_ukernel_1x4__scalar_lrintf, xnn_init_qs8_conv_minmax_fp32_scalar_lrintf_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
}
-TEST(QS8_IGEMM_MINMAX_FP32_1X4__SCALAR_LRINT, qmin) {
+TEST(QS8_IGEMM_MINMAX_FP32_1X4__SCALAR_LRINTF, qmin) {
GemmMicrokernelTester()
.mr(1)
.nr(4)
@@ -75928,10 +75928,10 @@
.n(4)
.k(1)
.qmin(128)
- .Test(xnn_qs8_igemm_minmax_fp32_ukernel_1x4__scalar_lrint, xnn_init_qs8_conv_minmax_fp32_scalar_lrint_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qs8_igemm_minmax_fp32_ukernel_1x4__scalar_lrintf, xnn_init_qs8_conv_minmax_fp32_scalar_lrintf_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
-TEST(QS8_IGEMM_MINMAX_FP32_1X4__SCALAR_LRINT, qmax) {
+TEST(QS8_IGEMM_MINMAX_FP32_1X4__SCALAR_LRINTF, qmax) {
GemmMicrokernelTester()
.mr(1)
.nr(4)
@@ -75941,10 +75941,10 @@
.n(4)
.k(1)
.qmax(128)
- .Test(xnn_qs8_igemm_minmax_fp32_ukernel_1x4__scalar_lrint, xnn_init_qs8_conv_minmax_fp32_scalar_lrint_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qs8_igemm_minmax_fp32_ukernel_1x4__scalar_lrintf, xnn_init_qs8_conv_minmax_fp32_scalar_lrintf_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
-TEST(QS8_IGEMM_MINMAX_FP32_1X4__SCALAR_LRINT, strided_cm) {
+TEST(QS8_IGEMM_MINMAX_FP32_1X4__SCALAR_LRINTF, strided_cm) {
GemmMicrokernelTester()
.mr(1)
.nr(4)
@@ -75954,11 +75954,11 @@
.n(4)
.k(1)
.cm_stride(7)
- .Test(xnn_qs8_igemm_minmax_fp32_ukernel_1x4__scalar_lrint, xnn_init_qs8_conv_minmax_fp32_scalar_lrint_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qs8_igemm_minmax_fp32_ukernel_1x4__scalar_lrintf, xnn_init_qs8_conv_minmax_fp32_scalar_lrintf_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
-TEST(QS8_IGEMM_MINMAX_FP32_2X4__SCALAR_LRINT, k_eq_1) {
+TEST(QS8_IGEMM_MINMAX_FP32_2X4__SCALAR_LRINTF, k_eq_1) {
GemmMicrokernelTester()
.mr(2)
.nr(4)
@@ -75967,10 +75967,10 @@
.m(2)
.n(4)
.k(1)
- .Test(xnn_qs8_igemm_minmax_fp32_ukernel_2x4__scalar_lrint, xnn_init_qs8_conv_minmax_fp32_scalar_lrint_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qs8_igemm_minmax_fp32_ukernel_2x4__scalar_lrintf, xnn_init_qs8_conv_minmax_fp32_scalar_lrintf_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
-TEST(QS8_IGEMM_MINMAX_FP32_2X4__SCALAR_LRINT, strided_cn) {
+TEST(QS8_IGEMM_MINMAX_FP32_2X4__SCALAR_LRINTF, strided_cn) {
GemmMicrokernelTester()
.mr(2)
.nr(4)
@@ -75980,10 +75980,10 @@
.n(4)
.k(1)
.cn_stride(7)
- .Test(xnn_qs8_igemm_minmax_fp32_ukernel_2x4__scalar_lrint, xnn_init_qs8_conv_minmax_fp32_scalar_lrint_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qs8_igemm_minmax_fp32_ukernel_2x4__scalar_lrintf, xnn_init_qs8_conv_minmax_fp32_scalar_lrintf_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
-TEST(QS8_IGEMM_MINMAX_FP32_2X4__SCALAR_LRINT, k_eq_1_subtile) {
+TEST(QS8_IGEMM_MINMAX_FP32_2X4__SCALAR_LRINTF, k_eq_1_subtile) {
for (uint32_t m = 1; m <= 2; m++) {
for (uint32_t n = 1; n <= 4; n++) {
GemmMicrokernelTester()
@@ -75995,12 +75995,12 @@
.n(n)
.k(1)
.iterations(1)
- .Test(xnn_qs8_igemm_minmax_fp32_ukernel_2x4__scalar_lrint, xnn_init_qs8_conv_minmax_fp32_scalar_lrint_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qs8_igemm_minmax_fp32_ukernel_2x4__scalar_lrintf, xnn_init_qs8_conv_minmax_fp32_scalar_lrintf_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
}
-TEST(QS8_IGEMM_MINMAX_FP32_2X4__SCALAR_LRINT, k_eq_1_subtile_m) {
+TEST(QS8_IGEMM_MINMAX_FP32_2X4__SCALAR_LRINTF, k_eq_1_subtile_m) {
for (uint32_t m = 1; m <= 2; m++) {
GemmMicrokernelTester()
.mr(2)
@@ -76011,11 +76011,11 @@
.n(4)
.k(1)
.iterations(1)
- .Test(xnn_qs8_igemm_minmax_fp32_ukernel_2x4__scalar_lrint, xnn_init_qs8_conv_minmax_fp32_scalar_lrint_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qs8_igemm_minmax_fp32_ukernel_2x4__scalar_lrintf, xnn_init_qs8_conv_minmax_fp32_scalar_lrintf_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
-TEST(QS8_IGEMM_MINMAX_FP32_2X4__SCALAR_LRINT, k_eq_1_subtile_n) {
+TEST(QS8_IGEMM_MINMAX_FP32_2X4__SCALAR_LRINTF, k_eq_1_subtile_n) {
for (uint32_t n = 1; n <= 4; n++) {
GemmMicrokernelTester()
.mr(2)
@@ -76026,11 +76026,11 @@
.n(n)
.k(1)
.iterations(1)
- .Test(xnn_qs8_igemm_minmax_fp32_ukernel_2x4__scalar_lrint, xnn_init_qs8_conv_minmax_fp32_scalar_lrint_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qs8_igemm_minmax_fp32_ukernel_2x4__scalar_lrintf, xnn_init_qs8_conv_minmax_fp32_scalar_lrintf_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
-TEST(QS8_IGEMM_MINMAX_FP32_2X4__SCALAR_LRINT, k_gt_1) {
+TEST(QS8_IGEMM_MINMAX_FP32_2X4__SCALAR_LRINTF, k_gt_1) {
for (size_t k = 2; k < 10; k++) {
GemmMicrokernelTester()
.mr(2)
@@ -76040,11 +76040,11 @@
.m(2)
.n(4)
.k(k)
- .Test(xnn_qs8_igemm_minmax_fp32_ukernel_2x4__scalar_lrint, xnn_init_qs8_conv_minmax_fp32_scalar_lrint_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qs8_igemm_minmax_fp32_ukernel_2x4__scalar_lrintf, xnn_init_qs8_conv_minmax_fp32_scalar_lrintf_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
-TEST(QS8_IGEMM_MINMAX_FP32_2X4__SCALAR_LRINT, k_gt_1_subtile) {
+TEST(QS8_IGEMM_MINMAX_FP32_2X4__SCALAR_LRINTF, k_gt_1_subtile) {
for (size_t k = 2; k < 10; k++) {
for (uint32_t m = 1; m <= 2; m++) {
for (uint32_t n = 1; n <= 4; n++) {
@@ -76057,13 +76057,13 @@
.n(n)
.k(k)
.iterations(1)
- .Test(xnn_qs8_igemm_minmax_fp32_ukernel_2x4__scalar_lrint, xnn_init_qs8_conv_minmax_fp32_scalar_lrint_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qs8_igemm_minmax_fp32_ukernel_2x4__scalar_lrintf, xnn_init_qs8_conv_minmax_fp32_scalar_lrintf_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
}
}
-TEST(QS8_IGEMM_MINMAX_FP32_2X4__SCALAR_LRINT, n_gt_4) {
+TEST(QS8_IGEMM_MINMAX_FP32_2X4__SCALAR_LRINTF, n_gt_4) {
for (uint32_t n = 5; n < 8; n++) {
for (size_t k = 1; k <= 5; k += 2) {
GemmMicrokernelTester()
@@ -76074,12 +76074,12 @@
.m(2)
.n(4)
.k(k)
- .Test(xnn_qs8_igemm_minmax_fp32_ukernel_2x4__scalar_lrint, xnn_init_qs8_conv_minmax_fp32_scalar_lrint_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qs8_igemm_minmax_fp32_ukernel_2x4__scalar_lrintf, xnn_init_qs8_conv_minmax_fp32_scalar_lrintf_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
}
-TEST(QS8_IGEMM_MINMAX_FP32_2X4__SCALAR_LRINT, n_gt_4_strided_cn) {
+TEST(QS8_IGEMM_MINMAX_FP32_2X4__SCALAR_LRINTF, n_gt_4_strided_cn) {
for (uint32_t n = 5; n < 8; n++) {
for (size_t k = 1; k <= 5; k += 2) {
GemmMicrokernelTester()
@@ -76091,12 +76091,12 @@
.n(4)
.k(k)
.cn_stride(7)
- .Test(xnn_qs8_igemm_minmax_fp32_ukernel_2x4__scalar_lrint, xnn_init_qs8_conv_minmax_fp32_scalar_lrint_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qs8_igemm_minmax_fp32_ukernel_2x4__scalar_lrintf, xnn_init_qs8_conv_minmax_fp32_scalar_lrintf_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
}
-TEST(QS8_IGEMM_MINMAX_FP32_2X4__SCALAR_LRINT, n_gt_4_subtile) {
+TEST(QS8_IGEMM_MINMAX_FP32_2X4__SCALAR_LRINTF, n_gt_4_subtile) {
for (uint32_t n = 5; n < 8; n++) {
for (size_t k = 1; k <= 5; k += 2) {
for (uint32_t m = 1; m <= 2; m++) {
@@ -76109,13 +76109,13 @@
.n(n)
.k(k)
.iterations(1)
- .Test(xnn_qs8_igemm_minmax_fp32_ukernel_2x4__scalar_lrint, xnn_init_qs8_conv_minmax_fp32_scalar_lrint_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qs8_igemm_minmax_fp32_ukernel_2x4__scalar_lrintf, xnn_init_qs8_conv_minmax_fp32_scalar_lrintf_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
}
}
-TEST(QS8_IGEMM_MINMAX_FP32_2X4__SCALAR_LRINT, n_div_4) {
+TEST(QS8_IGEMM_MINMAX_FP32_2X4__SCALAR_LRINTF, n_div_4) {
for (uint32_t n = 8; n <= 12; n += 4) {
for (size_t k = 1; k <= 5; k += 2) {
GemmMicrokernelTester()
@@ -76126,12 +76126,12 @@
.m(2)
.n(4)
.k(k)
- .Test(xnn_qs8_igemm_minmax_fp32_ukernel_2x4__scalar_lrint, xnn_init_qs8_conv_minmax_fp32_scalar_lrint_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qs8_igemm_minmax_fp32_ukernel_2x4__scalar_lrintf, xnn_init_qs8_conv_minmax_fp32_scalar_lrintf_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
}
-TEST(QS8_IGEMM_MINMAX_FP32_2X4__SCALAR_LRINT, n_div_4_strided_cn) {
+TEST(QS8_IGEMM_MINMAX_FP32_2X4__SCALAR_LRINTF, n_div_4_strided_cn) {
for (uint32_t n = 8; n <= 12; n += 4) {
for (size_t k = 1; k <= 5; k += 2) {
GemmMicrokernelTester()
@@ -76143,12 +76143,12 @@
.n(n)
.k(k)
.cn_stride(7)
- .Test(xnn_qs8_igemm_minmax_fp32_ukernel_2x4__scalar_lrint, xnn_init_qs8_conv_minmax_fp32_scalar_lrint_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qs8_igemm_minmax_fp32_ukernel_2x4__scalar_lrintf, xnn_init_qs8_conv_minmax_fp32_scalar_lrintf_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
}
-TEST(QS8_IGEMM_MINMAX_FP32_2X4__SCALAR_LRINT, n_div_4_subtile) {
+TEST(QS8_IGEMM_MINMAX_FP32_2X4__SCALAR_LRINTF, n_div_4_subtile) {
for (uint32_t n = 8; n <= 12; n += 4) {
for (size_t k = 1; k <= 5; k += 2) {
for (uint32_t m = 1; m <= 2; m++) {
@@ -76161,13 +76161,13 @@
.n(n)
.k(k)
.iterations(1)
- .Test(xnn_qs8_igemm_minmax_fp32_ukernel_2x4__scalar_lrint, xnn_init_qs8_conv_minmax_fp32_scalar_lrint_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qs8_igemm_minmax_fp32_ukernel_2x4__scalar_lrintf, xnn_init_qs8_conv_minmax_fp32_scalar_lrintf_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
}
}
-TEST(QS8_IGEMM_MINMAX_FP32_2X4__SCALAR_LRINT, small_kernel) {
+TEST(QS8_IGEMM_MINMAX_FP32_2X4__SCALAR_LRINTF, small_kernel) {
for (size_t k = 1; k <= 5; k += 2) {
GemmMicrokernelTester()
.mr(2)
@@ -76178,11 +76178,11 @@
.n(4)
.k(k)
.ks(3)
- .Test(xnn_qs8_igemm_minmax_fp32_ukernel_2x4__scalar_lrint, xnn_init_qs8_conv_minmax_fp32_scalar_lrint_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qs8_igemm_minmax_fp32_ukernel_2x4__scalar_lrintf, xnn_init_qs8_conv_minmax_fp32_scalar_lrintf_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
-TEST(QS8_IGEMM_MINMAX_FP32_2X4__SCALAR_LRINT, small_kernel_subtile) {
+TEST(QS8_IGEMM_MINMAX_FP32_2X4__SCALAR_LRINTF, small_kernel_subtile) {
for (size_t k = 1; k <= 5; k += 2) {
for (uint32_t m = 1; m <= 2; m++) {
for (uint32_t n = 1; n <= 4; n++) {
@@ -76196,13 +76196,13 @@
.k(k)
.ks(3)
.iterations(1)
- .Test(xnn_qs8_igemm_minmax_fp32_ukernel_2x4__scalar_lrint, xnn_init_qs8_conv_minmax_fp32_scalar_lrint_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qs8_igemm_minmax_fp32_ukernel_2x4__scalar_lrintf, xnn_init_qs8_conv_minmax_fp32_scalar_lrintf_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
}
}
-TEST(QS8_IGEMM_MINMAX_FP32_2X4__SCALAR_LRINT, n_gt_4_small_kernel) {
+TEST(QS8_IGEMM_MINMAX_FP32_2X4__SCALAR_LRINTF, n_gt_4_small_kernel) {
for (uint32_t n = 5; n < 8; n++) {
for (size_t k = 1; k <= 5; k += 2) {
GemmMicrokernelTester()
@@ -76214,12 +76214,12 @@
.n(4)
.k(k)
.ks(3)
- .Test(xnn_qs8_igemm_minmax_fp32_ukernel_2x4__scalar_lrint, xnn_init_qs8_conv_minmax_fp32_scalar_lrint_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qs8_igemm_minmax_fp32_ukernel_2x4__scalar_lrintf, xnn_init_qs8_conv_minmax_fp32_scalar_lrintf_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
}
-TEST(QS8_IGEMM_MINMAX_FP32_2X4__SCALAR_LRINT, n_div_4_small_kernel) {
+TEST(QS8_IGEMM_MINMAX_FP32_2X4__SCALAR_LRINTF, n_div_4_small_kernel) {
for (uint32_t n = 8; n <= 12; n += 4) {
for (size_t k = 1; k <= 5; k += 2) {
GemmMicrokernelTester()
@@ -76231,12 +76231,12 @@
.n(4)
.k(k)
.ks(3)
- .Test(xnn_qs8_igemm_minmax_fp32_ukernel_2x4__scalar_lrint, xnn_init_qs8_conv_minmax_fp32_scalar_lrint_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qs8_igemm_minmax_fp32_ukernel_2x4__scalar_lrintf, xnn_init_qs8_conv_minmax_fp32_scalar_lrintf_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
}
-TEST(QS8_IGEMM_MINMAX_FP32_2X4__SCALAR_LRINT, strided_cm_subtile) {
+TEST(QS8_IGEMM_MINMAX_FP32_2X4__SCALAR_LRINTF, strided_cm_subtile) {
for (size_t k = 1; k <= 5; k += 2) {
for (uint32_t m = 1; m <= 2; m++) {
for (uint32_t n = 1; n <= 4; n++) {
@@ -76250,13 +76250,13 @@
.k(k)
.cm_stride(7)
.iterations(1)
- .Test(xnn_qs8_igemm_minmax_fp32_ukernel_2x4__scalar_lrint, xnn_init_qs8_conv_minmax_fp32_scalar_lrint_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qs8_igemm_minmax_fp32_ukernel_2x4__scalar_lrintf, xnn_init_qs8_conv_minmax_fp32_scalar_lrintf_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
}
}
-TEST(QS8_IGEMM_MINMAX_FP32_2X4__SCALAR_LRINT, a_offset) {
+TEST(QS8_IGEMM_MINMAX_FP32_2X4__SCALAR_LRINTF, a_offset) {
for (size_t k = 1; k <= 5; k += 2) {
GemmMicrokernelTester()
.mr(2)
@@ -76268,11 +76268,11 @@
.k(k)
.ks(3)
.a_offset(13)
- .Test(xnn_qs8_igemm_minmax_fp32_ukernel_2x4__scalar_lrint, xnn_init_qs8_conv_minmax_fp32_scalar_lrint_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qs8_igemm_minmax_fp32_ukernel_2x4__scalar_lrintf, xnn_init_qs8_conv_minmax_fp32_scalar_lrintf_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
-TEST(QS8_IGEMM_MINMAX_FP32_2X4__SCALAR_LRINT, zero) {
+TEST(QS8_IGEMM_MINMAX_FP32_2X4__SCALAR_LRINTF, zero) {
for (uint32_t mz = 0; mz < 2; mz++) {
for (size_t k = 1; k <= 5; k += 2) {
GemmMicrokernelTester()
@@ -76286,12 +76286,12 @@
.ks(3)
.a_offset(13)
.zero_index(mz)
- .Test(xnn_qs8_igemm_minmax_fp32_ukernel_2x4__scalar_lrint, xnn_init_qs8_conv_minmax_fp32_scalar_lrint_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qs8_igemm_minmax_fp32_ukernel_2x4__scalar_lrintf, xnn_init_qs8_conv_minmax_fp32_scalar_lrintf_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
}
-TEST(QS8_IGEMM_MINMAX_FP32_2X4__SCALAR_LRINT, qmin) {
+TEST(QS8_IGEMM_MINMAX_FP32_2X4__SCALAR_LRINTF, qmin) {
GemmMicrokernelTester()
.mr(2)
.nr(4)
@@ -76301,10 +76301,10 @@
.n(4)
.k(1)
.qmin(128)
- .Test(xnn_qs8_igemm_minmax_fp32_ukernel_2x4__scalar_lrint, xnn_init_qs8_conv_minmax_fp32_scalar_lrint_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qs8_igemm_minmax_fp32_ukernel_2x4__scalar_lrintf, xnn_init_qs8_conv_minmax_fp32_scalar_lrintf_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
-TEST(QS8_IGEMM_MINMAX_FP32_2X4__SCALAR_LRINT, qmax) {
+TEST(QS8_IGEMM_MINMAX_FP32_2X4__SCALAR_LRINTF, qmax) {
GemmMicrokernelTester()
.mr(2)
.nr(4)
@@ -76314,10 +76314,10 @@
.n(4)
.k(1)
.qmax(128)
- .Test(xnn_qs8_igemm_minmax_fp32_ukernel_2x4__scalar_lrint, xnn_init_qs8_conv_minmax_fp32_scalar_lrint_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qs8_igemm_minmax_fp32_ukernel_2x4__scalar_lrintf, xnn_init_qs8_conv_minmax_fp32_scalar_lrintf_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
-TEST(QS8_IGEMM_MINMAX_FP32_2X4__SCALAR_LRINT, strided_cm) {
+TEST(QS8_IGEMM_MINMAX_FP32_2X4__SCALAR_LRINTF, strided_cm) {
GemmMicrokernelTester()
.mr(2)
.nr(4)
@@ -76327,11 +76327,11 @@
.n(4)
.k(1)
.cm_stride(7)
- .Test(xnn_qs8_igemm_minmax_fp32_ukernel_2x4__scalar_lrint, xnn_init_qs8_conv_minmax_fp32_scalar_lrint_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qs8_igemm_minmax_fp32_ukernel_2x4__scalar_lrintf, xnn_init_qs8_conv_minmax_fp32_scalar_lrintf_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
-TEST(QS8_IGEMM_MINMAX_FP32_3X4__SCALAR_LRINT, k_eq_1) {
+TEST(QS8_IGEMM_MINMAX_FP32_3X4__SCALAR_LRINTF, k_eq_1) {
GemmMicrokernelTester()
.mr(3)
.nr(4)
@@ -76340,10 +76340,10 @@
.m(3)
.n(4)
.k(1)
- .Test(xnn_qs8_igemm_minmax_fp32_ukernel_3x4__scalar_lrint, xnn_init_qs8_conv_minmax_fp32_scalar_lrint_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qs8_igemm_minmax_fp32_ukernel_3x4__scalar_lrintf, xnn_init_qs8_conv_minmax_fp32_scalar_lrintf_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
-TEST(QS8_IGEMM_MINMAX_FP32_3X4__SCALAR_LRINT, strided_cn) {
+TEST(QS8_IGEMM_MINMAX_FP32_3X4__SCALAR_LRINTF, strided_cn) {
GemmMicrokernelTester()
.mr(3)
.nr(4)
@@ -76353,10 +76353,10 @@
.n(4)
.k(1)
.cn_stride(7)
- .Test(xnn_qs8_igemm_minmax_fp32_ukernel_3x4__scalar_lrint, xnn_init_qs8_conv_minmax_fp32_scalar_lrint_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qs8_igemm_minmax_fp32_ukernel_3x4__scalar_lrintf, xnn_init_qs8_conv_minmax_fp32_scalar_lrintf_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
-TEST(QS8_IGEMM_MINMAX_FP32_3X4__SCALAR_LRINT, k_eq_1_subtile) {
+TEST(QS8_IGEMM_MINMAX_FP32_3X4__SCALAR_LRINTF, k_eq_1_subtile) {
for (uint32_t m = 1; m <= 3; m++) {
for (uint32_t n = 1; n <= 4; n++) {
GemmMicrokernelTester()
@@ -76368,12 +76368,12 @@
.n(n)
.k(1)
.iterations(1)
- .Test(xnn_qs8_igemm_minmax_fp32_ukernel_3x4__scalar_lrint, xnn_init_qs8_conv_minmax_fp32_scalar_lrint_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qs8_igemm_minmax_fp32_ukernel_3x4__scalar_lrintf, xnn_init_qs8_conv_minmax_fp32_scalar_lrintf_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
}
-TEST(QS8_IGEMM_MINMAX_FP32_3X4__SCALAR_LRINT, k_eq_1_subtile_m) {
+TEST(QS8_IGEMM_MINMAX_FP32_3X4__SCALAR_LRINTF, k_eq_1_subtile_m) {
for (uint32_t m = 1; m <= 3; m++) {
GemmMicrokernelTester()
.mr(3)
@@ -76384,11 +76384,11 @@
.n(4)
.k(1)
.iterations(1)
- .Test(xnn_qs8_igemm_minmax_fp32_ukernel_3x4__scalar_lrint, xnn_init_qs8_conv_minmax_fp32_scalar_lrint_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qs8_igemm_minmax_fp32_ukernel_3x4__scalar_lrintf, xnn_init_qs8_conv_minmax_fp32_scalar_lrintf_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
-TEST(QS8_IGEMM_MINMAX_FP32_3X4__SCALAR_LRINT, k_eq_1_subtile_n) {
+TEST(QS8_IGEMM_MINMAX_FP32_3X4__SCALAR_LRINTF, k_eq_1_subtile_n) {
for (uint32_t n = 1; n <= 4; n++) {
GemmMicrokernelTester()
.mr(3)
@@ -76399,11 +76399,11 @@
.n(n)
.k(1)
.iterations(1)
- .Test(xnn_qs8_igemm_minmax_fp32_ukernel_3x4__scalar_lrint, xnn_init_qs8_conv_minmax_fp32_scalar_lrint_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qs8_igemm_minmax_fp32_ukernel_3x4__scalar_lrintf, xnn_init_qs8_conv_minmax_fp32_scalar_lrintf_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
-TEST(QS8_IGEMM_MINMAX_FP32_3X4__SCALAR_LRINT, k_gt_1) {
+TEST(QS8_IGEMM_MINMAX_FP32_3X4__SCALAR_LRINTF, k_gt_1) {
for (size_t k = 2; k < 10; k++) {
GemmMicrokernelTester()
.mr(3)
@@ -76413,11 +76413,11 @@
.m(3)
.n(4)
.k(k)
- .Test(xnn_qs8_igemm_minmax_fp32_ukernel_3x4__scalar_lrint, xnn_init_qs8_conv_minmax_fp32_scalar_lrint_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qs8_igemm_minmax_fp32_ukernel_3x4__scalar_lrintf, xnn_init_qs8_conv_minmax_fp32_scalar_lrintf_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
-TEST(QS8_IGEMM_MINMAX_FP32_3X4__SCALAR_LRINT, k_gt_1_subtile) {
+TEST(QS8_IGEMM_MINMAX_FP32_3X4__SCALAR_LRINTF, k_gt_1_subtile) {
for (size_t k = 2; k < 10; k++) {
for (uint32_t m = 1; m <= 3; m++) {
for (uint32_t n = 1; n <= 4; n++) {
@@ -76430,13 +76430,13 @@
.n(n)
.k(k)
.iterations(1)
- .Test(xnn_qs8_igemm_minmax_fp32_ukernel_3x4__scalar_lrint, xnn_init_qs8_conv_minmax_fp32_scalar_lrint_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qs8_igemm_minmax_fp32_ukernel_3x4__scalar_lrintf, xnn_init_qs8_conv_minmax_fp32_scalar_lrintf_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
}
}
-TEST(QS8_IGEMM_MINMAX_FP32_3X4__SCALAR_LRINT, n_gt_4) {
+TEST(QS8_IGEMM_MINMAX_FP32_3X4__SCALAR_LRINTF, n_gt_4) {
for (uint32_t n = 5; n < 8; n++) {
for (size_t k = 1; k <= 5; k += 2) {
GemmMicrokernelTester()
@@ -76447,12 +76447,12 @@
.m(3)
.n(4)
.k(k)
- .Test(xnn_qs8_igemm_minmax_fp32_ukernel_3x4__scalar_lrint, xnn_init_qs8_conv_minmax_fp32_scalar_lrint_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qs8_igemm_minmax_fp32_ukernel_3x4__scalar_lrintf, xnn_init_qs8_conv_minmax_fp32_scalar_lrintf_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
}
-TEST(QS8_IGEMM_MINMAX_FP32_3X4__SCALAR_LRINT, n_gt_4_strided_cn) {
+TEST(QS8_IGEMM_MINMAX_FP32_3X4__SCALAR_LRINTF, n_gt_4_strided_cn) {
for (uint32_t n = 5; n < 8; n++) {
for (size_t k = 1; k <= 5; k += 2) {
GemmMicrokernelTester()
@@ -76464,12 +76464,12 @@
.n(4)
.k(k)
.cn_stride(7)
- .Test(xnn_qs8_igemm_minmax_fp32_ukernel_3x4__scalar_lrint, xnn_init_qs8_conv_minmax_fp32_scalar_lrint_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qs8_igemm_minmax_fp32_ukernel_3x4__scalar_lrintf, xnn_init_qs8_conv_minmax_fp32_scalar_lrintf_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
}
-TEST(QS8_IGEMM_MINMAX_FP32_3X4__SCALAR_LRINT, n_gt_4_subtile) {
+TEST(QS8_IGEMM_MINMAX_FP32_3X4__SCALAR_LRINTF, n_gt_4_subtile) {
for (uint32_t n = 5; n < 8; n++) {
for (size_t k = 1; k <= 5; k += 2) {
for (uint32_t m = 1; m <= 3; m++) {
@@ -76482,13 +76482,13 @@
.n(n)
.k(k)
.iterations(1)
- .Test(xnn_qs8_igemm_minmax_fp32_ukernel_3x4__scalar_lrint, xnn_init_qs8_conv_minmax_fp32_scalar_lrint_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qs8_igemm_minmax_fp32_ukernel_3x4__scalar_lrintf, xnn_init_qs8_conv_minmax_fp32_scalar_lrintf_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
}
}
-TEST(QS8_IGEMM_MINMAX_FP32_3X4__SCALAR_LRINT, n_div_4) {
+TEST(QS8_IGEMM_MINMAX_FP32_3X4__SCALAR_LRINTF, n_div_4) {
for (uint32_t n = 8; n <= 12; n += 4) {
for (size_t k = 1; k <= 5; k += 2) {
GemmMicrokernelTester()
@@ -76499,12 +76499,12 @@
.m(3)
.n(4)
.k(k)
- .Test(xnn_qs8_igemm_minmax_fp32_ukernel_3x4__scalar_lrint, xnn_init_qs8_conv_minmax_fp32_scalar_lrint_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qs8_igemm_minmax_fp32_ukernel_3x4__scalar_lrintf, xnn_init_qs8_conv_minmax_fp32_scalar_lrintf_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
}
-TEST(QS8_IGEMM_MINMAX_FP32_3X4__SCALAR_LRINT, n_div_4_strided_cn) {
+TEST(QS8_IGEMM_MINMAX_FP32_3X4__SCALAR_LRINTF, n_div_4_strided_cn) {
for (uint32_t n = 8; n <= 12; n += 4) {
for (size_t k = 1; k <= 5; k += 2) {
GemmMicrokernelTester()
@@ -76516,12 +76516,12 @@
.n(n)
.k(k)
.cn_stride(7)
- .Test(xnn_qs8_igemm_minmax_fp32_ukernel_3x4__scalar_lrint, xnn_init_qs8_conv_minmax_fp32_scalar_lrint_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qs8_igemm_minmax_fp32_ukernel_3x4__scalar_lrintf, xnn_init_qs8_conv_minmax_fp32_scalar_lrintf_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
}
-TEST(QS8_IGEMM_MINMAX_FP32_3X4__SCALAR_LRINT, n_div_4_subtile) {
+TEST(QS8_IGEMM_MINMAX_FP32_3X4__SCALAR_LRINTF, n_div_4_subtile) {
for (uint32_t n = 8; n <= 12; n += 4) {
for (size_t k = 1; k <= 5; k += 2) {
for (uint32_t m = 1; m <= 3; m++) {
@@ -76534,13 +76534,13 @@
.n(n)
.k(k)
.iterations(1)
- .Test(xnn_qs8_igemm_minmax_fp32_ukernel_3x4__scalar_lrint, xnn_init_qs8_conv_minmax_fp32_scalar_lrint_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qs8_igemm_minmax_fp32_ukernel_3x4__scalar_lrintf, xnn_init_qs8_conv_minmax_fp32_scalar_lrintf_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
}
}
-TEST(QS8_IGEMM_MINMAX_FP32_3X4__SCALAR_LRINT, small_kernel) {
+TEST(QS8_IGEMM_MINMAX_FP32_3X4__SCALAR_LRINTF, small_kernel) {
for (size_t k = 1; k <= 5; k += 2) {
GemmMicrokernelTester()
.mr(3)
@@ -76551,11 +76551,11 @@
.n(4)
.k(k)
.ks(3)
- .Test(xnn_qs8_igemm_minmax_fp32_ukernel_3x4__scalar_lrint, xnn_init_qs8_conv_minmax_fp32_scalar_lrint_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qs8_igemm_minmax_fp32_ukernel_3x4__scalar_lrintf, xnn_init_qs8_conv_minmax_fp32_scalar_lrintf_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
-TEST(QS8_IGEMM_MINMAX_FP32_3X4__SCALAR_LRINT, small_kernel_subtile) {
+TEST(QS8_IGEMM_MINMAX_FP32_3X4__SCALAR_LRINTF, small_kernel_subtile) {
for (size_t k = 1; k <= 5; k += 2) {
for (uint32_t m = 1; m <= 3; m++) {
for (uint32_t n = 1; n <= 4; n++) {
@@ -76569,13 +76569,13 @@
.k(k)
.ks(3)
.iterations(1)
- .Test(xnn_qs8_igemm_minmax_fp32_ukernel_3x4__scalar_lrint, xnn_init_qs8_conv_minmax_fp32_scalar_lrint_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qs8_igemm_minmax_fp32_ukernel_3x4__scalar_lrintf, xnn_init_qs8_conv_minmax_fp32_scalar_lrintf_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
}
}
-TEST(QS8_IGEMM_MINMAX_FP32_3X4__SCALAR_LRINT, n_gt_4_small_kernel) {
+TEST(QS8_IGEMM_MINMAX_FP32_3X4__SCALAR_LRINTF, n_gt_4_small_kernel) {
for (uint32_t n = 5; n < 8; n++) {
for (size_t k = 1; k <= 5; k += 2) {
GemmMicrokernelTester()
@@ -76587,12 +76587,12 @@
.n(4)
.k(k)
.ks(3)
- .Test(xnn_qs8_igemm_minmax_fp32_ukernel_3x4__scalar_lrint, xnn_init_qs8_conv_minmax_fp32_scalar_lrint_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qs8_igemm_minmax_fp32_ukernel_3x4__scalar_lrintf, xnn_init_qs8_conv_minmax_fp32_scalar_lrintf_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
}
-TEST(QS8_IGEMM_MINMAX_FP32_3X4__SCALAR_LRINT, n_div_4_small_kernel) {
+TEST(QS8_IGEMM_MINMAX_FP32_3X4__SCALAR_LRINTF, n_div_4_small_kernel) {
for (uint32_t n = 8; n <= 12; n += 4) {
for (size_t k = 1; k <= 5; k += 2) {
GemmMicrokernelTester()
@@ -76604,12 +76604,12 @@
.n(4)
.k(k)
.ks(3)
- .Test(xnn_qs8_igemm_minmax_fp32_ukernel_3x4__scalar_lrint, xnn_init_qs8_conv_minmax_fp32_scalar_lrint_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qs8_igemm_minmax_fp32_ukernel_3x4__scalar_lrintf, xnn_init_qs8_conv_minmax_fp32_scalar_lrintf_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
}
-TEST(QS8_IGEMM_MINMAX_FP32_3X4__SCALAR_LRINT, strided_cm_subtile) {
+TEST(QS8_IGEMM_MINMAX_FP32_3X4__SCALAR_LRINTF, strided_cm_subtile) {
for (size_t k = 1; k <= 5; k += 2) {
for (uint32_t m = 1; m <= 3; m++) {
for (uint32_t n = 1; n <= 4; n++) {
@@ -76623,13 +76623,13 @@
.k(k)
.cm_stride(7)
.iterations(1)
- .Test(xnn_qs8_igemm_minmax_fp32_ukernel_3x4__scalar_lrint, xnn_init_qs8_conv_minmax_fp32_scalar_lrint_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qs8_igemm_minmax_fp32_ukernel_3x4__scalar_lrintf, xnn_init_qs8_conv_minmax_fp32_scalar_lrintf_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
}
}
-TEST(QS8_IGEMM_MINMAX_FP32_3X4__SCALAR_LRINT, a_offset) {
+TEST(QS8_IGEMM_MINMAX_FP32_3X4__SCALAR_LRINTF, a_offset) {
for (size_t k = 1; k <= 5; k += 2) {
GemmMicrokernelTester()
.mr(3)
@@ -76641,11 +76641,11 @@
.k(k)
.ks(3)
.a_offset(17)
- .Test(xnn_qs8_igemm_minmax_fp32_ukernel_3x4__scalar_lrint, xnn_init_qs8_conv_minmax_fp32_scalar_lrint_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qs8_igemm_minmax_fp32_ukernel_3x4__scalar_lrintf, xnn_init_qs8_conv_minmax_fp32_scalar_lrintf_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
-TEST(QS8_IGEMM_MINMAX_FP32_3X4__SCALAR_LRINT, zero) {
+TEST(QS8_IGEMM_MINMAX_FP32_3X4__SCALAR_LRINTF, zero) {
for (uint32_t mz = 0; mz < 3; mz++) {
for (size_t k = 1; k <= 5; k += 2) {
GemmMicrokernelTester()
@@ -76659,12 +76659,12 @@
.ks(3)
.a_offset(17)
.zero_index(mz)
- .Test(xnn_qs8_igemm_minmax_fp32_ukernel_3x4__scalar_lrint, xnn_init_qs8_conv_minmax_fp32_scalar_lrint_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qs8_igemm_minmax_fp32_ukernel_3x4__scalar_lrintf, xnn_init_qs8_conv_minmax_fp32_scalar_lrintf_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
}
-TEST(QS8_IGEMM_MINMAX_FP32_3X4__SCALAR_LRINT, qmin) {
+TEST(QS8_IGEMM_MINMAX_FP32_3X4__SCALAR_LRINTF, qmin) {
GemmMicrokernelTester()
.mr(3)
.nr(4)
@@ -76674,10 +76674,10 @@
.n(4)
.k(1)
.qmin(128)
- .Test(xnn_qs8_igemm_minmax_fp32_ukernel_3x4__scalar_lrint, xnn_init_qs8_conv_minmax_fp32_scalar_lrint_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qs8_igemm_minmax_fp32_ukernel_3x4__scalar_lrintf, xnn_init_qs8_conv_minmax_fp32_scalar_lrintf_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
-TEST(QS8_IGEMM_MINMAX_FP32_3X4__SCALAR_LRINT, qmax) {
+TEST(QS8_IGEMM_MINMAX_FP32_3X4__SCALAR_LRINTF, qmax) {
GemmMicrokernelTester()
.mr(3)
.nr(4)
@@ -76687,10 +76687,10 @@
.n(4)
.k(1)
.qmax(128)
- .Test(xnn_qs8_igemm_minmax_fp32_ukernel_3x4__scalar_lrint, xnn_init_qs8_conv_minmax_fp32_scalar_lrint_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qs8_igemm_minmax_fp32_ukernel_3x4__scalar_lrintf, xnn_init_qs8_conv_minmax_fp32_scalar_lrintf_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
-TEST(QS8_IGEMM_MINMAX_FP32_3X4__SCALAR_LRINT, strided_cm) {
+TEST(QS8_IGEMM_MINMAX_FP32_3X4__SCALAR_LRINTF, strided_cm) {
GemmMicrokernelTester()
.mr(3)
.nr(4)
@@ -76700,11 +76700,11 @@
.n(4)
.k(1)
.cm_stride(7)
- .Test(xnn_qs8_igemm_minmax_fp32_ukernel_3x4__scalar_lrint, xnn_init_qs8_conv_minmax_fp32_scalar_lrint_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qs8_igemm_minmax_fp32_ukernel_3x4__scalar_lrintf, xnn_init_qs8_conv_minmax_fp32_scalar_lrintf_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
-TEST(QS8_IGEMM_MINMAX_FP32_4X4__SCALAR_LRINT, k_eq_1) {
+TEST(QS8_IGEMM_MINMAX_FP32_4X4__SCALAR_LRINTF, k_eq_1) {
GemmMicrokernelTester()
.mr(4)
.nr(4)
@@ -76713,10 +76713,10 @@
.m(4)
.n(4)
.k(1)
- .Test(xnn_qs8_igemm_minmax_fp32_ukernel_4x4__scalar_lrint, xnn_init_qs8_conv_minmax_fp32_scalar_lrint_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qs8_igemm_minmax_fp32_ukernel_4x4__scalar_lrintf, xnn_init_qs8_conv_minmax_fp32_scalar_lrintf_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
-TEST(QS8_IGEMM_MINMAX_FP32_4X4__SCALAR_LRINT, strided_cn) {
+TEST(QS8_IGEMM_MINMAX_FP32_4X4__SCALAR_LRINTF, strided_cn) {
GemmMicrokernelTester()
.mr(4)
.nr(4)
@@ -76726,10 +76726,10 @@
.n(4)
.k(1)
.cn_stride(7)
- .Test(xnn_qs8_igemm_minmax_fp32_ukernel_4x4__scalar_lrint, xnn_init_qs8_conv_minmax_fp32_scalar_lrint_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qs8_igemm_minmax_fp32_ukernel_4x4__scalar_lrintf, xnn_init_qs8_conv_minmax_fp32_scalar_lrintf_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
-TEST(QS8_IGEMM_MINMAX_FP32_4X4__SCALAR_LRINT, k_eq_1_subtile) {
+TEST(QS8_IGEMM_MINMAX_FP32_4X4__SCALAR_LRINTF, k_eq_1_subtile) {
for (uint32_t m = 1; m <= 4; m++) {
for (uint32_t n = 1; n <= 4; n++) {
GemmMicrokernelTester()
@@ -76741,12 +76741,12 @@
.n(n)
.k(1)
.iterations(1)
- .Test(xnn_qs8_igemm_minmax_fp32_ukernel_4x4__scalar_lrint, xnn_init_qs8_conv_minmax_fp32_scalar_lrint_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qs8_igemm_minmax_fp32_ukernel_4x4__scalar_lrintf, xnn_init_qs8_conv_minmax_fp32_scalar_lrintf_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
}
-TEST(QS8_IGEMM_MINMAX_FP32_4X4__SCALAR_LRINT, k_eq_1_subtile_m) {
+TEST(QS8_IGEMM_MINMAX_FP32_4X4__SCALAR_LRINTF, k_eq_1_subtile_m) {
for (uint32_t m = 1; m <= 4; m++) {
GemmMicrokernelTester()
.mr(4)
@@ -76757,11 +76757,11 @@
.n(4)
.k(1)
.iterations(1)
- .Test(xnn_qs8_igemm_minmax_fp32_ukernel_4x4__scalar_lrint, xnn_init_qs8_conv_minmax_fp32_scalar_lrint_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qs8_igemm_minmax_fp32_ukernel_4x4__scalar_lrintf, xnn_init_qs8_conv_minmax_fp32_scalar_lrintf_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
-TEST(QS8_IGEMM_MINMAX_FP32_4X4__SCALAR_LRINT, k_eq_1_subtile_n) {
+TEST(QS8_IGEMM_MINMAX_FP32_4X4__SCALAR_LRINTF, k_eq_1_subtile_n) {
for (uint32_t n = 1; n <= 4; n++) {
GemmMicrokernelTester()
.mr(4)
@@ -76772,11 +76772,11 @@
.n(n)
.k(1)
.iterations(1)
- .Test(xnn_qs8_igemm_minmax_fp32_ukernel_4x4__scalar_lrint, xnn_init_qs8_conv_minmax_fp32_scalar_lrint_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qs8_igemm_minmax_fp32_ukernel_4x4__scalar_lrintf, xnn_init_qs8_conv_minmax_fp32_scalar_lrintf_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
-TEST(QS8_IGEMM_MINMAX_FP32_4X4__SCALAR_LRINT, k_gt_1) {
+TEST(QS8_IGEMM_MINMAX_FP32_4X4__SCALAR_LRINTF, k_gt_1) {
for (size_t k = 2; k < 10; k++) {
GemmMicrokernelTester()
.mr(4)
@@ -76786,11 +76786,11 @@
.m(4)
.n(4)
.k(k)
- .Test(xnn_qs8_igemm_minmax_fp32_ukernel_4x4__scalar_lrint, xnn_init_qs8_conv_minmax_fp32_scalar_lrint_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qs8_igemm_minmax_fp32_ukernel_4x4__scalar_lrintf, xnn_init_qs8_conv_minmax_fp32_scalar_lrintf_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
-TEST(QS8_IGEMM_MINMAX_FP32_4X4__SCALAR_LRINT, k_gt_1_subtile) {
+TEST(QS8_IGEMM_MINMAX_FP32_4X4__SCALAR_LRINTF, k_gt_1_subtile) {
for (size_t k = 2; k < 10; k++) {
for (uint32_t m = 1; m <= 4; m++) {
for (uint32_t n = 1; n <= 4; n++) {
@@ -76803,13 +76803,13 @@
.n(n)
.k(k)
.iterations(1)
- .Test(xnn_qs8_igemm_minmax_fp32_ukernel_4x4__scalar_lrint, xnn_init_qs8_conv_minmax_fp32_scalar_lrint_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qs8_igemm_minmax_fp32_ukernel_4x4__scalar_lrintf, xnn_init_qs8_conv_minmax_fp32_scalar_lrintf_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
}
}
-TEST(QS8_IGEMM_MINMAX_FP32_4X4__SCALAR_LRINT, n_gt_4) {
+TEST(QS8_IGEMM_MINMAX_FP32_4X4__SCALAR_LRINTF, n_gt_4) {
for (uint32_t n = 5; n < 8; n++) {
for (size_t k = 1; k <= 5; k += 2) {
GemmMicrokernelTester()
@@ -76820,12 +76820,12 @@
.m(4)
.n(4)
.k(k)
- .Test(xnn_qs8_igemm_minmax_fp32_ukernel_4x4__scalar_lrint, xnn_init_qs8_conv_minmax_fp32_scalar_lrint_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qs8_igemm_minmax_fp32_ukernel_4x4__scalar_lrintf, xnn_init_qs8_conv_minmax_fp32_scalar_lrintf_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
}
-TEST(QS8_IGEMM_MINMAX_FP32_4X4__SCALAR_LRINT, n_gt_4_strided_cn) {
+TEST(QS8_IGEMM_MINMAX_FP32_4X4__SCALAR_LRINTF, n_gt_4_strided_cn) {
for (uint32_t n = 5; n < 8; n++) {
for (size_t k = 1; k <= 5; k += 2) {
GemmMicrokernelTester()
@@ -76837,12 +76837,12 @@
.n(4)
.k(k)
.cn_stride(7)
- .Test(xnn_qs8_igemm_minmax_fp32_ukernel_4x4__scalar_lrint, xnn_init_qs8_conv_minmax_fp32_scalar_lrint_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qs8_igemm_minmax_fp32_ukernel_4x4__scalar_lrintf, xnn_init_qs8_conv_minmax_fp32_scalar_lrintf_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
}
-TEST(QS8_IGEMM_MINMAX_FP32_4X4__SCALAR_LRINT, n_gt_4_subtile) {
+TEST(QS8_IGEMM_MINMAX_FP32_4X4__SCALAR_LRINTF, n_gt_4_subtile) {
for (uint32_t n = 5; n < 8; n++) {
for (size_t k = 1; k <= 5; k += 2) {
for (uint32_t m = 1; m <= 4; m++) {
@@ -76855,13 +76855,13 @@
.n(n)
.k(k)
.iterations(1)
- .Test(xnn_qs8_igemm_minmax_fp32_ukernel_4x4__scalar_lrint, xnn_init_qs8_conv_minmax_fp32_scalar_lrint_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qs8_igemm_minmax_fp32_ukernel_4x4__scalar_lrintf, xnn_init_qs8_conv_minmax_fp32_scalar_lrintf_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
}
}
-TEST(QS8_IGEMM_MINMAX_FP32_4X4__SCALAR_LRINT, n_div_4) {
+TEST(QS8_IGEMM_MINMAX_FP32_4X4__SCALAR_LRINTF, n_div_4) {
for (uint32_t n = 8; n <= 12; n += 4) {
for (size_t k = 1; k <= 5; k += 2) {
GemmMicrokernelTester()
@@ -76872,12 +76872,12 @@
.m(4)
.n(4)
.k(k)
- .Test(xnn_qs8_igemm_minmax_fp32_ukernel_4x4__scalar_lrint, xnn_init_qs8_conv_minmax_fp32_scalar_lrint_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qs8_igemm_minmax_fp32_ukernel_4x4__scalar_lrintf, xnn_init_qs8_conv_minmax_fp32_scalar_lrintf_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
}
-TEST(QS8_IGEMM_MINMAX_FP32_4X4__SCALAR_LRINT, n_div_4_strided_cn) {
+TEST(QS8_IGEMM_MINMAX_FP32_4X4__SCALAR_LRINTF, n_div_4_strided_cn) {
for (uint32_t n = 8; n <= 12; n += 4) {
for (size_t k = 1; k <= 5; k += 2) {
GemmMicrokernelTester()
@@ -76889,12 +76889,12 @@
.n(n)
.k(k)
.cn_stride(7)
- .Test(xnn_qs8_igemm_minmax_fp32_ukernel_4x4__scalar_lrint, xnn_init_qs8_conv_minmax_fp32_scalar_lrint_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qs8_igemm_minmax_fp32_ukernel_4x4__scalar_lrintf, xnn_init_qs8_conv_minmax_fp32_scalar_lrintf_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
}
-TEST(QS8_IGEMM_MINMAX_FP32_4X4__SCALAR_LRINT, n_div_4_subtile) {
+TEST(QS8_IGEMM_MINMAX_FP32_4X4__SCALAR_LRINTF, n_div_4_subtile) {
for (uint32_t n = 8; n <= 12; n += 4) {
for (size_t k = 1; k <= 5; k += 2) {
for (uint32_t m = 1; m <= 4; m++) {
@@ -76907,13 +76907,13 @@
.n(n)
.k(k)
.iterations(1)
- .Test(xnn_qs8_igemm_minmax_fp32_ukernel_4x4__scalar_lrint, xnn_init_qs8_conv_minmax_fp32_scalar_lrint_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qs8_igemm_minmax_fp32_ukernel_4x4__scalar_lrintf, xnn_init_qs8_conv_minmax_fp32_scalar_lrintf_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
}
}
-TEST(QS8_IGEMM_MINMAX_FP32_4X4__SCALAR_LRINT, small_kernel) {
+TEST(QS8_IGEMM_MINMAX_FP32_4X4__SCALAR_LRINTF, small_kernel) {
for (size_t k = 1; k <= 5; k += 2) {
GemmMicrokernelTester()
.mr(4)
@@ -76924,11 +76924,11 @@
.n(4)
.k(k)
.ks(3)
- .Test(xnn_qs8_igemm_minmax_fp32_ukernel_4x4__scalar_lrint, xnn_init_qs8_conv_minmax_fp32_scalar_lrint_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qs8_igemm_minmax_fp32_ukernel_4x4__scalar_lrintf, xnn_init_qs8_conv_minmax_fp32_scalar_lrintf_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
-TEST(QS8_IGEMM_MINMAX_FP32_4X4__SCALAR_LRINT, small_kernel_subtile) {
+TEST(QS8_IGEMM_MINMAX_FP32_4X4__SCALAR_LRINTF, small_kernel_subtile) {
for (size_t k = 1; k <= 5; k += 2) {
for (uint32_t m = 1; m <= 4; m++) {
for (uint32_t n = 1; n <= 4; n++) {
@@ -76942,13 +76942,13 @@
.k(k)
.ks(3)
.iterations(1)
- .Test(xnn_qs8_igemm_minmax_fp32_ukernel_4x4__scalar_lrint, xnn_init_qs8_conv_minmax_fp32_scalar_lrint_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qs8_igemm_minmax_fp32_ukernel_4x4__scalar_lrintf, xnn_init_qs8_conv_minmax_fp32_scalar_lrintf_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
}
}
-TEST(QS8_IGEMM_MINMAX_FP32_4X4__SCALAR_LRINT, n_gt_4_small_kernel) {
+TEST(QS8_IGEMM_MINMAX_FP32_4X4__SCALAR_LRINTF, n_gt_4_small_kernel) {
for (uint32_t n = 5; n < 8; n++) {
for (size_t k = 1; k <= 5; k += 2) {
GemmMicrokernelTester()
@@ -76960,12 +76960,12 @@
.n(4)
.k(k)
.ks(3)
- .Test(xnn_qs8_igemm_minmax_fp32_ukernel_4x4__scalar_lrint, xnn_init_qs8_conv_minmax_fp32_scalar_lrint_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qs8_igemm_minmax_fp32_ukernel_4x4__scalar_lrintf, xnn_init_qs8_conv_minmax_fp32_scalar_lrintf_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
}
-TEST(QS8_IGEMM_MINMAX_FP32_4X4__SCALAR_LRINT, n_div_4_small_kernel) {
+TEST(QS8_IGEMM_MINMAX_FP32_4X4__SCALAR_LRINTF, n_div_4_small_kernel) {
for (uint32_t n = 8; n <= 12; n += 4) {
for (size_t k = 1; k <= 5; k += 2) {
GemmMicrokernelTester()
@@ -76977,12 +76977,12 @@
.n(4)
.k(k)
.ks(3)
- .Test(xnn_qs8_igemm_minmax_fp32_ukernel_4x4__scalar_lrint, xnn_init_qs8_conv_minmax_fp32_scalar_lrint_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qs8_igemm_minmax_fp32_ukernel_4x4__scalar_lrintf, xnn_init_qs8_conv_minmax_fp32_scalar_lrintf_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
}
-TEST(QS8_IGEMM_MINMAX_FP32_4X4__SCALAR_LRINT, strided_cm_subtile) {
+TEST(QS8_IGEMM_MINMAX_FP32_4X4__SCALAR_LRINTF, strided_cm_subtile) {
for (size_t k = 1; k <= 5; k += 2) {
for (uint32_t m = 1; m <= 4; m++) {
for (uint32_t n = 1; n <= 4; n++) {
@@ -76996,13 +76996,13 @@
.k(k)
.cm_stride(7)
.iterations(1)
- .Test(xnn_qs8_igemm_minmax_fp32_ukernel_4x4__scalar_lrint, xnn_init_qs8_conv_minmax_fp32_scalar_lrint_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qs8_igemm_minmax_fp32_ukernel_4x4__scalar_lrintf, xnn_init_qs8_conv_minmax_fp32_scalar_lrintf_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
}
}
-TEST(QS8_IGEMM_MINMAX_FP32_4X4__SCALAR_LRINT, a_offset) {
+TEST(QS8_IGEMM_MINMAX_FP32_4X4__SCALAR_LRINTF, a_offset) {
for (size_t k = 1; k <= 5; k += 2) {
GemmMicrokernelTester()
.mr(4)
@@ -77014,11 +77014,11 @@
.k(k)
.ks(3)
.a_offset(23)
- .Test(xnn_qs8_igemm_minmax_fp32_ukernel_4x4__scalar_lrint, xnn_init_qs8_conv_minmax_fp32_scalar_lrint_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qs8_igemm_minmax_fp32_ukernel_4x4__scalar_lrintf, xnn_init_qs8_conv_minmax_fp32_scalar_lrintf_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
-TEST(QS8_IGEMM_MINMAX_FP32_4X4__SCALAR_LRINT, zero) {
+TEST(QS8_IGEMM_MINMAX_FP32_4X4__SCALAR_LRINTF, zero) {
for (uint32_t mz = 0; mz < 4; mz++) {
for (size_t k = 1; k <= 5; k += 2) {
GemmMicrokernelTester()
@@ -77032,12 +77032,12 @@
.ks(3)
.a_offset(23)
.zero_index(mz)
- .Test(xnn_qs8_igemm_minmax_fp32_ukernel_4x4__scalar_lrint, xnn_init_qs8_conv_minmax_fp32_scalar_lrint_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qs8_igemm_minmax_fp32_ukernel_4x4__scalar_lrintf, xnn_init_qs8_conv_minmax_fp32_scalar_lrintf_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
}
-TEST(QS8_IGEMM_MINMAX_FP32_4X4__SCALAR_LRINT, qmin) {
+TEST(QS8_IGEMM_MINMAX_FP32_4X4__SCALAR_LRINTF, qmin) {
GemmMicrokernelTester()
.mr(4)
.nr(4)
@@ -77047,10 +77047,10 @@
.n(4)
.k(1)
.qmin(128)
- .Test(xnn_qs8_igemm_minmax_fp32_ukernel_4x4__scalar_lrint, xnn_init_qs8_conv_minmax_fp32_scalar_lrint_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qs8_igemm_minmax_fp32_ukernel_4x4__scalar_lrintf, xnn_init_qs8_conv_minmax_fp32_scalar_lrintf_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
-TEST(QS8_IGEMM_MINMAX_FP32_4X4__SCALAR_LRINT, qmax) {
+TEST(QS8_IGEMM_MINMAX_FP32_4X4__SCALAR_LRINTF, qmax) {
GemmMicrokernelTester()
.mr(4)
.nr(4)
@@ -77060,10 +77060,10 @@
.n(4)
.k(1)
.qmax(128)
- .Test(xnn_qs8_igemm_minmax_fp32_ukernel_4x4__scalar_lrint, xnn_init_qs8_conv_minmax_fp32_scalar_lrint_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qs8_igemm_minmax_fp32_ukernel_4x4__scalar_lrintf, xnn_init_qs8_conv_minmax_fp32_scalar_lrintf_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
-TEST(QS8_IGEMM_MINMAX_FP32_4X4__SCALAR_LRINT, strided_cm) {
+TEST(QS8_IGEMM_MINMAX_FP32_4X4__SCALAR_LRINTF, strided_cm) {
GemmMicrokernelTester()
.mr(4)
.nr(4)
@@ -77073,11 +77073,11 @@
.n(4)
.k(1)
.cm_stride(7)
- .Test(xnn_qs8_igemm_minmax_fp32_ukernel_4x4__scalar_lrint, xnn_init_qs8_conv_minmax_fp32_scalar_lrint_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qs8_igemm_minmax_fp32_ukernel_4x4__scalar_lrintf, xnn_init_qs8_conv_minmax_fp32_scalar_lrintf_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
-TEST(QS8_IGEMM_MINMAX_FP32_1X2__SCALAR_MAGIC, k_eq_1) {
+TEST(QS8_IGEMM_MINMAX_FP32_1X2__SCALAR_FMAGIC, k_eq_1) {
GemmMicrokernelTester()
.mr(1)
.nr(2)
@@ -77086,10 +77086,10 @@
.m(1)
.n(2)
.k(1)
- .Test(xnn_qs8_igemm_minmax_fp32_ukernel_1x2__scalar_magic, xnn_init_qs8_conv_minmax_fp32_scalar_magic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qs8_igemm_minmax_fp32_ukernel_1x2__scalar_fmagic, xnn_init_qs8_conv_minmax_fp32_scalar_fmagic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
-TEST(QS8_IGEMM_MINMAX_FP32_1X2__SCALAR_MAGIC, strided_cn) {
+TEST(QS8_IGEMM_MINMAX_FP32_1X2__SCALAR_FMAGIC, strided_cn) {
GemmMicrokernelTester()
.mr(1)
.nr(2)
@@ -77099,10 +77099,10 @@
.n(2)
.k(1)
.cn_stride(5)
- .Test(xnn_qs8_igemm_minmax_fp32_ukernel_1x2__scalar_magic, xnn_init_qs8_conv_minmax_fp32_scalar_magic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qs8_igemm_minmax_fp32_ukernel_1x2__scalar_fmagic, xnn_init_qs8_conv_minmax_fp32_scalar_fmagic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
-TEST(QS8_IGEMM_MINMAX_FP32_1X2__SCALAR_MAGIC, k_eq_1_subtile) {
+TEST(QS8_IGEMM_MINMAX_FP32_1X2__SCALAR_FMAGIC, k_eq_1_subtile) {
for (uint32_t m = 1; m <= 1; m++) {
for (uint32_t n = 1; n <= 2; n++) {
GemmMicrokernelTester()
@@ -77114,12 +77114,12 @@
.n(n)
.k(1)
.iterations(1)
- .Test(xnn_qs8_igemm_minmax_fp32_ukernel_1x2__scalar_magic, xnn_init_qs8_conv_minmax_fp32_scalar_magic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qs8_igemm_minmax_fp32_ukernel_1x2__scalar_fmagic, xnn_init_qs8_conv_minmax_fp32_scalar_fmagic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
}
-TEST(QS8_IGEMM_MINMAX_FP32_1X2__SCALAR_MAGIC, k_eq_1_subtile_m) {
+TEST(QS8_IGEMM_MINMAX_FP32_1X2__SCALAR_FMAGIC, k_eq_1_subtile_m) {
for (uint32_t m = 1; m <= 1; m++) {
GemmMicrokernelTester()
.mr(1)
@@ -77130,11 +77130,11 @@
.n(2)
.k(1)
.iterations(1)
- .Test(xnn_qs8_igemm_minmax_fp32_ukernel_1x2__scalar_magic, xnn_init_qs8_conv_minmax_fp32_scalar_magic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qs8_igemm_minmax_fp32_ukernel_1x2__scalar_fmagic, xnn_init_qs8_conv_minmax_fp32_scalar_fmagic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
-TEST(QS8_IGEMM_MINMAX_FP32_1X2__SCALAR_MAGIC, k_eq_1_subtile_n) {
+TEST(QS8_IGEMM_MINMAX_FP32_1X2__SCALAR_FMAGIC, k_eq_1_subtile_n) {
for (uint32_t n = 1; n <= 2; n++) {
GemmMicrokernelTester()
.mr(1)
@@ -77145,11 +77145,11 @@
.n(n)
.k(1)
.iterations(1)
- .Test(xnn_qs8_igemm_minmax_fp32_ukernel_1x2__scalar_magic, xnn_init_qs8_conv_minmax_fp32_scalar_magic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qs8_igemm_minmax_fp32_ukernel_1x2__scalar_fmagic, xnn_init_qs8_conv_minmax_fp32_scalar_fmagic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
-TEST(QS8_IGEMM_MINMAX_FP32_1X2__SCALAR_MAGIC, k_gt_1) {
+TEST(QS8_IGEMM_MINMAX_FP32_1X2__SCALAR_FMAGIC, k_gt_1) {
for (size_t k = 2; k < 10; k++) {
GemmMicrokernelTester()
.mr(1)
@@ -77159,11 +77159,11 @@
.m(1)
.n(2)
.k(k)
- .Test(xnn_qs8_igemm_minmax_fp32_ukernel_1x2__scalar_magic, xnn_init_qs8_conv_minmax_fp32_scalar_magic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qs8_igemm_minmax_fp32_ukernel_1x2__scalar_fmagic, xnn_init_qs8_conv_minmax_fp32_scalar_fmagic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
-TEST(QS8_IGEMM_MINMAX_FP32_1X2__SCALAR_MAGIC, k_gt_1_subtile) {
+TEST(QS8_IGEMM_MINMAX_FP32_1X2__SCALAR_FMAGIC, k_gt_1_subtile) {
for (size_t k = 2; k < 10; k++) {
for (uint32_t m = 1; m <= 1; m++) {
for (uint32_t n = 1; n <= 2; n++) {
@@ -77176,13 +77176,13 @@
.n(n)
.k(k)
.iterations(1)
- .Test(xnn_qs8_igemm_minmax_fp32_ukernel_1x2__scalar_magic, xnn_init_qs8_conv_minmax_fp32_scalar_magic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qs8_igemm_minmax_fp32_ukernel_1x2__scalar_fmagic, xnn_init_qs8_conv_minmax_fp32_scalar_fmagic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
}
}
-TEST(QS8_IGEMM_MINMAX_FP32_1X2__SCALAR_MAGIC, n_gt_2) {
+TEST(QS8_IGEMM_MINMAX_FP32_1X2__SCALAR_FMAGIC, n_gt_2) {
for (uint32_t n = 3; n < 4; n++) {
for (size_t k = 1; k <= 5; k += 2) {
GemmMicrokernelTester()
@@ -77193,12 +77193,12 @@
.m(1)
.n(2)
.k(k)
- .Test(xnn_qs8_igemm_minmax_fp32_ukernel_1x2__scalar_magic, xnn_init_qs8_conv_minmax_fp32_scalar_magic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qs8_igemm_minmax_fp32_ukernel_1x2__scalar_fmagic, xnn_init_qs8_conv_minmax_fp32_scalar_fmagic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
}
-TEST(QS8_IGEMM_MINMAX_FP32_1X2__SCALAR_MAGIC, n_gt_2_strided_cn) {
+TEST(QS8_IGEMM_MINMAX_FP32_1X2__SCALAR_FMAGIC, n_gt_2_strided_cn) {
for (uint32_t n = 3; n < 4; n++) {
for (size_t k = 1; k <= 5; k += 2) {
GemmMicrokernelTester()
@@ -77210,12 +77210,12 @@
.n(2)
.k(k)
.cn_stride(5)
- .Test(xnn_qs8_igemm_minmax_fp32_ukernel_1x2__scalar_magic, xnn_init_qs8_conv_minmax_fp32_scalar_magic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qs8_igemm_minmax_fp32_ukernel_1x2__scalar_fmagic, xnn_init_qs8_conv_minmax_fp32_scalar_fmagic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
}
-TEST(QS8_IGEMM_MINMAX_FP32_1X2__SCALAR_MAGIC, n_gt_2_subtile) {
+TEST(QS8_IGEMM_MINMAX_FP32_1X2__SCALAR_FMAGIC, n_gt_2_subtile) {
for (uint32_t n = 3; n < 4; n++) {
for (size_t k = 1; k <= 5; k += 2) {
for (uint32_t m = 1; m <= 1; m++) {
@@ -77228,13 +77228,13 @@
.n(n)
.k(k)
.iterations(1)
- .Test(xnn_qs8_igemm_minmax_fp32_ukernel_1x2__scalar_magic, xnn_init_qs8_conv_minmax_fp32_scalar_magic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qs8_igemm_minmax_fp32_ukernel_1x2__scalar_fmagic, xnn_init_qs8_conv_minmax_fp32_scalar_fmagic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
}
}
-TEST(QS8_IGEMM_MINMAX_FP32_1X2__SCALAR_MAGIC, n_div_2) {
+TEST(QS8_IGEMM_MINMAX_FP32_1X2__SCALAR_FMAGIC, n_div_2) {
for (uint32_t n = 4; n <= 6; n += 2) {
for (size_t k = 1; k <= 5; k += 2) {
GemmMicrokernelTester()
@@ -77245,12 +77245,12 @@
.m(1)
.n(2)
.k(k)
- .Test(xnn_qs8_igemm_minmax_fp32_ukernel_1x2__scalar_magic, xnn_init_qs8_conv_minmax_fp32_scalar_magic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qs8_igemm_minmax_fp32_ukernel_1x2__scalar_fmagic, xnn_init_qs8_conv_minmax_fp32_scalar_fmagic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
}
-TEST(QS8_IGEMM_MINMAX_FP32_1X2__SCALAR_MAGIC, n_div_2_strided_cn) {
+TEST(QS8_IGEMM_MINMAX_FP32_1X2__SCALAR_FMAGIC, n_div_2_strided_cn) {
for (uint32_t n = 4; n <= 6; n += 2) {
for (size_t k = 1; k <= 5; k += 2) {
GemmMicrokernelTester()
@@ -77262,12 +77262,12 @@
.n(n)
.k(k)
.cn_stride(5)
- .Test(xnn_qs8_igemm_minmax_fp32_ukernel_1x2__scalar_magic, xnn_init_qs8_conv_minmax_fp32_scalar_magic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qs8_igemm_minmax_fp32_ukernel_1x2__scalar_fmagic, xnn_init_qs8_conv_minmax_fp32_scalar_fmagic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
}
-TEST(QS8_IGEMM_MINMAX_FP32_1X2__SCALAR_MAGIC, n_div_2_subtile) {
+TEST(QS8_IGEMM_MINMAX_FP32_1X2__SCALAR_FMAGIC, n_div_2_subtile) {
for (uint32_t n = 4; n <= 6; n += 2) {
for (size_t k = 1; k <= 5; k += 2) {
for (uint32_t m = 1; m <= 1; m++) {
@@ -77280,13 +77280,13 @@
.n(n)
.k(k)
.iterations(1)
- .Test(xnn_qs8_igemm_minmax_fp32_ukernel_1x2__scalar_magic, xnn_init_qs8_conv_minmax_fp32_scalar_magic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qs8_igemm_minmax_fp32_ukernel_1x2__scalar_fmagic, xnn_init_qs8_conv_minmax_fp32_scalar_fmagic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
}
}
-TEST(QS8_IGEMM_MINMAX_FP32_1X2__SCALAR_MAGIC, small_kernel) {
+TEST(QS8_IGEMM_MINMAX_FP32_1X2__SCALAR_FMAGIC, small_kernel) {
for (size_t k = 1; k <= 5; k += 2) {
GemmMicrokernelTester()
.mr(1)
@@ -77297,11 +77297,11 @@
.n(2)
.k(k)
.ks(3)
- .Test(xnn_qs8_igemm_minmax_fp32_ukernel_1x2__scalar_magic, xnn_init_qs8_conv_minmax_fp32_scalar_magic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qs8_igemm_minmax_fp32_ukernel_1x2__scalar_fmagic, xnn_init_qs8_conv_minmax_fp32_scalar_fmagic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
-TEST(QS8_IGEMM_MINMAX_FP32_1X2__SCALAR_MAGIC, small_kernel_subtile) {
+TEST(QS8_IGEMM_MINMAX_FP32_1X2__SCALAR_FMAGIC, small_kernel_subtile) {
for (size_t k = 1; k <= 5; k += 2) {
for (uint32_t m = 1; m <= 1; m++) {
for (uint32_t n = 1; n <= 2; n++) {
@@ -77315,13 +77315,13 @@
.k(k)
.ks(3)
.iterations(1)
- .Test(xnn_qs8_igemm_minmax_fp32_ukernel_1x2__scalar_magic, xnn_init_qs8_conv_minmax_fp32_scalar_magic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qs8_igemm_minmax_fp32_ukernel_1x2__scalar_fmagic, xnn_init_qs8_conv_minmax_fp32_scalar_fmagic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
}
}
-TEST(QS8_IGEMM_MINMAX_FP32_1X2__SCALAR_MAGIC, n_gt_2_small_kernel) {
+TEST(QS8_IGEMM_MINMAX_FP32_1X2__SCALAR_FMAGIC, n_gt_2_small_kernel) {
for (uint32_t n = 3; n < 4; n++) {
for (size_t k = 1; k <= 5; k += 2) {
GemmMicrokernelTester()
@@ -77333,12 +77333,12 @@
.n(2)
.k(k)
.ks(3)
- .Test(xnn_qs8_igemm_minmax_fp32_ukernel_1x2__scalar_magic, xnn_init_qs8_conv_minmax_fp32_scalar_magic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qs8_igemm_minmax_fp32_ukernel_1x2__scalar_fmagic, xnn_init_qs8_conv_minmax_fp32_scalar_fmagic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
}
-TEST(QS8_IGEMM_MINMAX_FP32_1X2__SCALAR_MAGIC, n_div_2_small_kernel) {
+TEST(QS8_IGEMM_MINMAX_FP32_1X2__SCALAR_FMAGIC, n_div_2_small_kernel) {
for (uint32_t n = 4; n <= 6; n += 2) {
for (size_t k = 1; k <= 5; k += 2) {
GemmMicrokernelTester()
@@ -77350,12 +77350,12 @@
.n(2)
.k(k)
.ks(3)
- .Test(xnn_qs8_igemm_minmax_fp32_ukernel_1x2__scalar_magic, xnn_init_qs8_conv_minmax_fp32_scalar_magic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qs8_igemm_minmax_fp32_ukernel_1x2__scalar_fmagic, xnn_init_qs8_conv_minmax_fp32_scalar_fmagic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
}
-TEST(QS8_IGEMM_MINMAX_FP32_1X2__SCALAR_MAGIC, strided_cm_subtile) {
+TEST(QS8_IGEMM_MINMAX_FP32_1X2__SCALAR_FMAGIC, strided_cm_subtile) {
for (size_t k = 1; k <= 5; k += 2) {
for (uint32_t m = 1; m <= 1; m++) {
for (uint32_t n = 1; n <= 2; n++) {
@@ -77369,13 +77369,13 @@
.k(k)
.cm_stride(5)
.iterations(1)
- .Test(xnn_qs8_igemm_minmax_fp32_ukernel_1x2__scalar_magic, xnn_init_qs8_conv_minmax_fp32_scalar_magic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qs8_igemm_minmax_fp32_ukernel_1x2__scalar_fmagic, xnn_init_qs8_conv_minmax_fp32_scalar_fmagic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
}
}
-TEST(QS8_IGEMM_MINMAX_FP32_1X2__SCALAR_MAGIC, a_offset) {
+TEST(QS8_IGEMM_MINMAX_FP32_1X2__SCALAR_FMAGIC, a_offset) {
for (size_t k = 1; k <= 5; k += 2) {
GemmMicrokernelTester()
.mr(1)
@@ -77387,11 +77387,11 @@
.k(k)
.ks(3)
.a_offset(7)
- .Test(xnn_qs8_igemm_minmax_fp32_ukernel_1x2__scalar_magic, xnn_init_qs8_conv_minmax_fp32_scalar_magic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qs8_igemm_minmax_fp32_ukernel_1x2__scalar_fmagic, xnn_init_qs8_conv_minmax_fp32_scalar_fmagic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
-TEST(QS8_IGEMM_MINMAX_FP32_1X2__SCALAR_MAGIC, zero) {
+TEST(QS8_IGEMM_MINMAX_FP32_1X2__SCALAR_FMAGIC, zero) {
for (uint32_t mz = 0; mz < 1; mz++) {
for (size_t k = 1; k <= 5; k += 2) {
GemmMicrokernelTester()
@@ -77405,12 +77405,12 @@
.ks(3)
.a_offset(7)
.zero_index(mz)
- .Test(xnn_qs8_igemm_minmax_fp32_ukernel_1x2__scalar_magic, xnn_init_qs8_conv_minmax_fp32_scalar_magic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qs8_igemm_minmax_fp32_ukernel_1x2__scalar_fmagic, xnn_init_qs8_conv_minmax_fp32_scalar_fmagic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
}
-TEST(QS8_IGEMM_MINMAX_FP32_1X2__SCALAR_MAGIC, qmin) {
+TEST(QS8_IGEMM_MINMAX_FP32_1X2__SCALAR_FMAGIC, qmin) {
GemmMicrokernelTester()
.mr(1)
.nr(2)
@@ -77420,10 +77420,10 @@
.n(2)
.k(1)
.qmin(128)
- .Test(xnn_qs8_igemm_minmax_fp32_ukernel_1x2__scalar_magic, xnn_init_qs8_conv_minmax_fp32_scalar_magic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qs8_igemm_minmax_fp32_ukernel_1x2__scalar_fmagic, xnn_init_qs8_conv_minmax_fp32_scalar_fmagic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
-TEST(QS8_IGEMM_MINMAX_FP32_1X2__SCALAR_MAGIC, qmax) {
+TEST(QS8_IGEMM_MINMAX_FP32_1X2__SCALAR_FMAGIC, qmax) {
GemmMicrokernelTester()
.mr(1)
.nr(2)
@@ -77433,10 +77433,10 @@
.n(2)
.k(1)
.qmax(128)
- .Test(xnn_qs8_igemm_minmax_fp32_ukernel_1x2__scalar_magic, xnn_init_qs8_conv_minmax_fp32_scalar_magic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qs8_igemm_minmax_fp32_ukernel_1x2__scalar_fmagic, xnn_init_qs8_conv_minmax_fp32_scalar_fmagic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
-TEST(QS8_IGEMM_MINMAX_FP32_1X2__SCALAR_MAGIC, strided_cm) {
+TEST(QS8_IGEMM_MINMAX_FP32_1X2__SCALAR_FMAGIC, strided_cm) {
GemmMicrokernelTester()
.mr(1)
.nr(2)
@@ -77446,11 +77446,11 @@
.n(2)
.k(1)
.cm_stride(5)
- .Test(xnn_qs8_igemm_minmax_fp32_ukernel_1x2__scalar_magic, xnn_init_qs8_conv_minmax_fp32_scalar_magic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qs8_igemm_minmax_fp32_ukernel_1x2__scalar_fmagic, xnn_init_qs8_conv_minmax_fp32_scalar_fmagic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
-TEST(QS8_IGEMM_MINMAX_FP32_2X2__SCALAR_MAGIC, k_eq_1) {
+TEST(QS8_IGEMM_MINMAX_FP32_2X2__SCALAR_FMAGIC, k_eq_1) {
GemmMicrokernelTester()
.mr(2)
.nr(2)
@@ -77459,10 +77459,10 @@
.m(2)
.n(2)
.k(1)
- .Test(xnn_qs8_igemm_minmax_fp32_ukernel_2x2__scalar_magic, xnn_init_qs8_conv_minmax_fp32_scalar_magic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qs8_igemm_minmax_fp32_ukernel_2x2__scalar_fmagic, xnn_init_qs8_conv_minmax_fp32_scalar_fmagic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
-TEST(QS8_IGEMM_MINMAX_FP32_2X2__SCALAR_MAGIC, strided_cn) {
+TEST(QS8_IGEMM_MINMAX_FP32_2X2__SCALAR_FMAGIC, strided_cn) {
GemmMicrokernelTester()
.mr(2)
.nr(2)
@@ -77472,10 +77472,10 @@
.n(2)
.k(1)
.cn_stride(5)
- .Test(xnn_qs8_igemm_minmax_fp32_ukernel_2x2__scalar_magic, xnn_init_qs8_conv_minmax_fp32_scalar_magic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qs8_igemm_minmax_fp32_ukernel_2x2__scalar_fmagic, xnn_init_qs8_conv_minmax_fp32_scalar_fmagic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
-TEST(QS8_IGEMM_MINMAX_FP32_2X2__SCALAR_MAGIC, k_eq_1_subtile) {
+TEST(QS8_IGEMM_MINMAX_FP32_2X2__SCALAR_FMAGIC, k_eq_1_subtile) {
for (uint32_t m = 1; m <= 2; m++) {
for (uint32_t n = 1; n <= 2; n++) {
GemmMicrokernelTester()
@@ -77487,12 +77487,12 @@
.n(n)
.k(1)
.iterations(1)
- .Test(xnn_qs8_igemm_minmax_fp32_ukernel_2x2__scalar_magic, xnn_init_qs8_conv_minmax_fp32_scalar_magic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qs8_igemm_minmax_fp32_ukernel_2x2__scalar_fmagic, xnn_init_qs8_conv_minmax_fp32_scalar_fmagic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
}
-TEST(QS8_IGEMM_MINMAX_FP32_2X2__SCALAR_MAGIC, k_eq_1_subtile_m) {
+TEST(QS8_IGEMM_MINMAX_FP32_2X2__SCALAR_FMAGIC, k_eq_1_subtile_m) {
for (uint32_t m = 1; m <= 2; m++) {
GemmMicrokernelTester()
.mr(2)
@@ -77503,11 +77503,11 @@
.n(2)
.k(1)
.iterations(1)
- .Test(xnn_qs8_igemm_minmax_fp32_ukernel_2x2__scalar_magic, xnn_init_qs8_conv_minmax_fp32_scalar_magic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qs8_igemm_minmax_fp32_ukernel_2x2__scalar_fmagic, xnn_init_qs8_conv_minmax_fp32_scalar_fmagic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
-TEST(QS8_IGEMM_MINMAX_FP32_2X2__SCALAR_MAGIC, k_eq_1_subtile_n) {
+TEST(QS8_IGEMM_MINMAX_FP32_2X2__SCALAR_FMAGIC, k_eq_1_subtile_n) {
for (uint32_t n = 1; n <= 2; n++) {
GemmMicrokernelTester()
.mr(2)
@@ -77518,11 +77518,11 @@
.n(n)
.k(1)
.iterations(1)
- .Test(xnn_qs8_igemm_minmax_fp32_ukernel_2x2__scalar_magic, xnn_init_qs8_conv_minmax_fp32_scalar_magic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qs8_igemm_minmax_fp32_ukernel_2x2__scalar_fmagic, xnn_init_qs8_conv_minmax_fp32_scalar_fmagic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
-TEST(QS8_IGEMM_MINMAX_FP32_2X2__SCALAR_MAGIC, k_gt_1) {
+TEST(QS8_IGEMM_MINMAX_FP32_2X2__SCALAR_FMAGIC, k_gt_1) {
for (size_t k = 2; k < 10; k++) {
GemmMicrokernelTester()
.mr(2)
@@ -77532,11 +77532,11 @@
.m(2)
.n(2)
.k(k)
- .Test(xnn_qs8_igemm_minmax_fp32_ukernel_2x2__scalar_magic, xnn_init_qs8_conv_minmax_fp32_scalar_magic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qs8_igemm_minmax_fp32_ukernel_2x2__scalar_fmagic, xnn_init_qs8_conv_minmax_fp32_scalar_fmagic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
-TEST(QS8_IGEMM_MINMAX_FP32_2X2__SCALAR_MAGIC, k_gt_1_subtile) {
+TEST(QS8_IGEMM_MINMAX_FP32_2X2__SCALAR_FMAGIC, k_gt_1_subtile) {
for (size_t k = 2; k < 10; k++) {
for (uint32_t m = 1; m <= 2; m++) {
for (uint32_t n = 1; n <= 2; n++) {
@@ -77549,13 +77549,13 @@
.n(n)
.k(k)
.iterations(1)
- .Test(xnn_qs8_igemm_minmax_fp32_ukernel_2x2__scalar_magic, xnn_init_qs8_conv_minmax_fp32_scalar_magic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qs8_igemm_minmax_fp32_ukernel_2x2__scalar_fmagic, xnn_init_qs8_conv_minmax_fp32_scalar_fmagic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
}
}
-TEST(QS8_IGEMM_MINMAX_FP32_2X2__SCALAR_MAGIC, n_gt_2) {
+TEST(QS8_IGEMM_MINMAX_FP32_2X2__SCALAR_FMAGIC, n_gt_2) {
for (uint32_t n = 3; n < 4; n++) {
for (size_t k = 1; k <= 5; k += 2) {
GemmMicrokernelTester()
@@ -77566,12 +77566,12 @@
.m(2)
.n(2)
.k(k)
- .Test(xnn_qs8_igemm_minmax_fp32_ukernel_2x2__scalar_magic, xnn_init_qs8_conv_minmax_fp32_scalar_magic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qs8_igemm_minmax_fp32_ukernel_2x2__scalar_fmagic, xnn_init_qs8_conv_minmax_fp32_scalar_fmagic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
}
-TEST(QS8_IGEMM_MINMAX_FP32_2X2__SCALAR_MAGIC, n_gt_2_strided_cn) {
+TEST(QS8_IGEMM_MINMAX_FP32_2X2__SCALAR_FMAGIC, n_gt_2_strided_cn) {
for (uint32_t n = 3; n < 4; n++) {
for (size_t k = 1; k <= 5; k += 2) {
GemmMicrokernelTester()
@@ -77583,12 +77583,12 @@
.n(2)
.k(k)
.cn_stride(5)
- .Test(xnn_qs8_igemm_minmax_fp32_ukernel_2x2__scalar_magic, xnn_init_qs8_conv_minmax_fp32_scalar_magic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qs8_igemm_minmax_fp32_ukernel_2x2__scalar_fmagic, xnn_init_qs8_conv_minmax_fp32_scalar_fmagic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
}
-TEST(QS8_IGEMM_MINMAX_FP32_2X2__SCALAR_MAGIC, n_gt_2_subtile) {
+TEST(QS8_IGEMM_MINMAX_FP32_2X2__SCALAR_FMAGIC, n_gt_2_subtile) {
for (uint32_t n = 3; n < 4; n++) {
for (size_t k = 1; k <= 5; k += 2) {
for (uint32_t m = 1; m <= 2; m++) {
@@ -77601,13 +77601,13 @@
.n(n)
.k(k)
.iterations(1)
- .Test(xnn_qs8_igemm_minmax_fp32_ukernel_2x2__scalar_magic, xnn_init_qs8_conv_minmax_fp32_scalar_magic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qs8_igemm_minmax_fp32_ukernel_2x2__scalar_fmagic, xnn_init_qs8_conv_minmax_fp32_scalar_fmagic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
}
}
-TEST(QS8_IGEMM_MINMAX_FP32_2X2__SCALAR_MAGIC, n_div_2) {
+TEST(QS8_IGEMM_MINMAX_FP32_2X2__SCALAR_FMAGIC, n_div_2) {
for (uint32_t n = 4; n <= 6; n += 2) {
for (size_t k = 1; k <= 5; k += 2) {
GemmMicrokernelTester()
@@ -77618,12 +77618,12 @@
.m(2)
.n(2)
.k(k)
- .Test(xnn_qs8_igemm_minmax_fp32_ukernel_2x2__scalar_magic, xnn_init_qs8_conv_minmax_fp32_scalar_magic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qs8_igemm_minmax_fp32_ukernel_2x2__scalar_fmagic, xnn_init_qs8_conv_minmax_fp32_scalar_fmagic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
}
-TEST(QS8_IGEMM_MINMAX_FP32_2X2__SCALAR_MAGIC, n_div_2_strided_cn) {
+TEST(QS8_IGEMM_MINMAX_FP32_2X2__SCALAR_FMAGIC, n_div_2_strided_cn) {
for (uint32_t n = 4; n <= 6; n += 2) {
for (size_t k = 1; k <= 5; k += 2) {
GemmMicrokernelTester()
@@ -77635,12 +77635,12 @@
.n(n)
.k(k)
.cn_stride(5)
- .Test(xnn_qs8_igemm_minmax_fp32_ukernel_2x2__scalar_magic, xnn_init_qs8_conv_minmax_fp32_scalar_magic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qs8_igemm_minmax_fp32_ukernel_2x2__scalar_fmagic, xnn_init_qs8_conv_minmax_fp32_scalar_fmagic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
}
-TEST(QS8_IGEMM_MINMAX_FP32_2X2__SCALAR_MAGIC, n_div_2_subtile) {
+TEST(QS8_IGEMM_MINMAX_FP32_2X2__SCALAR_FMAGIC, n_div_2_subtile) {
for (uint32_t n = 4; n <= 6; n += 2) {
for (size_t k = 1; k <= 5; k += 2) {
for (uint32_t m = 1; m <= 2; m++) {
@@ -77653,13 +77653,13 @@
.n(n)
.k(k)
.iterations(1)
- .Test(xnn_qs8_igemm_minmax_fp32_ukernel_2x2__scalar_magic, xnn_init_qs8_conv_minmax_fp32_scalar_magic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qs8_igemm_minmax_fp32_ukernel_2x2__scalar_fmagic, xnn_init_qs8_conv_minmax_fp32_scalar_fmagic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
}
}
-TEST(QS8_IGEMM_MINMAX_FP32_2X2__SCALAR_MAGIC, small_kernel) {
+TEST(QS8_IGEMM_MINMAX_FP32_2X2__SCALAR_FMAGIC, small_kernel) {
for (size_t k = 1; k <= 5; k += 2) {
GemmMicrokernelTester()
.mr(2)
@@ -77670,11 +77670,11 @@
.n(2)
.k(k)
.ks(3)
- .Test(xnn_qs8_igemm_minmax_fp32_ukernel_2x2__scalar_magic, xnn_init_qs8_conv_minmax_fp32_scalar_magic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qs8_igemm_minmax_fp32_ukernel_2x2__scalar_fmagic, xnn_init_qs8_conv_minmax_fp32_scalar_fmagic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
-TEST(QS8_IGEMM_MINMAX_FP32_2X2__SCALAR_MAGIC, small_kernel_subtile) {
+TEST(QS8_IGEMM_MINMAX_FP32_2X2__SCALAR_FMAGIC, small_kernel_subtile) {
for (size_t k = 1; k <= 5; k += 2) {
for (uint32_t m = 1; m <= 2; m++) {
for (uint32_t n = 1; n <= 2; n++) {
@@ -77688,13 +77688,13 @@
.k(k)
.ks(3)
.iterations(1)
- .Test(xnn_qs8_igemm_minmax_fp32_ukernel_2x2__scalar_magic, xnn_init_qs8_conv_minmax_fp32_scalar_magic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qs8_igemm_minmax_fp32_ukernel_2x2__scalar_fmagic, xnn_init_qs8_conv_minmax_fp32_scalar_fmagic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
}
}
-TEST(QS8_IGEMM_MINMAX_FP32_2X2__SCALAR_MAGIC, n_gt_2_small_kernel) {
+TEST(QS8_IGEMM_MINMAX_FP32_2X2__SCALAR_FMAGIC, n_gt_2_small_kernel) {
for (uint32_t n = 3; n < 4; n++) {
for (size_t k = 1; k <= 5; k += 2) {
GemmMicrokernelTester()
@@ -77706,12 +77706,12 @@
.n(2)
.k(k)
.ks(3)
- .Test(xnn_qs8_igemm_minmax_fp32_ukernel_2x2__scalar_magic, xnn_init_qs8_conv_minmax_fp32_scalar_magic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qs8_igemm_minmax_fp32_ukernel_2x2__scalar_fmagic, xnn_init_qs8_conv_minmax_fp32_scalar_fmagic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
}
-TEST(QS8_IGEMM_MINMAX_FP32_2X2__SCALAR_MAGIC, n_div_2_small_kernel) {
+TEST(QS8_IGEMM_MINMAX_FP32_2X2__SCALAR_FMAGIC, n_div_2_small_kernel) {
for (uint32_t n = 4; n <= 6; n += 2) {
for (size_t k = 1; k <= 5; k += 2) {
GemmMicrokernelTester()
@@ -77723,12 +77723,12 @@
.n(2)
.k(k)
.ks(3)
- .Test(xnn_qs8_igemm_minmax_fp32_ukernel_2x2__scalar_magic, xnn_init_qs8_conv_minmax_fp32_scalar_magic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qs8_igemm_minmax_fp32_ukernel_2x2__scalar_fmagic, xnn_init_qs8_conv_minmax_fp32_scalar_fmagic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
}
-TEST(QS8_IGEMM_MINMAX_FP32_2X2__SCALAR_MAGIC, strided_cm_subtile) {
+TEST(QS8_IGEMM_MINMAX_FP32_2X2__SCALAR_FMAGIC, strided_cm_subtile) {
for (size_t k = 1; k <= 5; k += 2) {
for (uint32_t m = 1; m <= 2; m++) {
for (uint32_t n = 1; n <= 2; n++) {
@@ -77742,13 +77742,13 @@
.k(k)
.cm_stride(5)
.iterations(1)
- .Test(xnn_qs8_igemm_minmax_fp32_ukernel_2x2__scalar_magic, xnn_init_qs8_conv_minmax_fp32_scalar_magic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qs8_igemm_minmax_fp32_ukernel_2x2__scalar_fmagic, xnn_init_qs8_conv_minmax_fp32_scalar_fmagic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
}
}
-TEST(QS8_IGEMM_MINMAX_FP32_2X2__SCALAR_MAGIC, a_offset) {
+TEST(QS8_IGEMM_MINMAX_FP32_2X2__SCALAR_FMAGIC, a_offset) {
for (size_t k = 1; k <= 5; k += 2) {
GemmMicrokernelTester()
.mr(2)
@@ -77760,11 +77760,11 @@
.k(k)
.ks(3)
.a_offset(13)
- .Test(xnn_qs8_igemm_minmax_fp32_ukernel_2x2__scalar_magic, xnn_init_qs8_conv_minmax_fp32_scalar_magic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qs8_igemm_minmax_fp32_ukernel_2x2__scalar_fmagic, xnn_init_qs8_conv_minmax_fp32_scalar_fmagic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
-TEST(QS8_IGEMM_MINMAX_FP32_2X2__SCALAR_MAGIC, zero) {
+TEST(QS8_IGEMM_MINMAX_FP32_2X2__SCALAR_FMAGIC, zero) {
for (uint32_t mz = 0; mz < 2; mz++) {
for (size_t k = 1; k <= 5; k += 2) {
GemmMicrokernelTester()
@@ -77778,12 +77778,12 @@
.ks(3)
.a_offset(13)
.zero_index(mz)
- .Test(xnn_qs8_igemm_minmax_fp32_ukernel_2x2__scalar_magic, xnn_init_qs8_conv_minmax_fp32_scalar_magic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qs8_igemm_minmax_fp32_ukernel_2x2__scalar_fmagic, xnn_init_qs8_conv_minmax_fp32_scalar_fmagic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
}
-TEST(QS8_IGEMM_MINMAX_FP32_2X2__SCALAR_MAGIC, qmin) {
+TEST(QS8_IGEMM_MINMAX_FP32_2X2__SCALAR_FMAGIC, qmin) {
GemmMicrokernelTester()
.mr(2)
.nr(2)
@@ -77793,10 +77793,10 @@
.n(2)
.k(1)
.qmin(128)
- .Test(xnn_qs8_igemm_minmax_fp32_ukernel_2x2__scalar_magic, xnn_init_qs8_conv_minmax_fp32_scalar_magic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qs8_igemm_minmax_fp32_ukernel_2x2__scalar_fmagic, xnn_init_qs8_conv_minmax_fp32_scalar_fmagic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
-TEST(QS8_IGEMM_MINMAX_FP32_2X2__SCALAR_MAGIC, qmax) {
+TEST(QS8_IGEMM_MINMAX_FP32_2X2__SCALAR_FMAGIC, qmax) {
GemmMicrokernelTester()
.mr(2)
.nr(2)
@@ -77806,10 +77806,10 @@
.n(2)
.k(1)
.qmax(128)
- .Test(xnn_qs8_igemm_minmax_fp32_ukernel_2x2__scalar_magic, xnn_init_qs8_conv_minmax_fp32_scalar_magic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qs8_igemm_minmax_fp32_ukernel_2x2__scalar_fmagic, xnn_init_qs8_conv_minmax_fp32_scalar_fmagic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
-TEST(QS8_IGEMM_MINMAX_FP32_2X2__SCALAR_MAGIC, strided_cm) {
+TEST(QS8_IGEMM_MINMAX_FP32_2X2__SCALAR_FMAGIC, strided_cm) {
GemmMicrokernelTester()
.mr(2)
.nr(2)
@@ -77819,11 +77819,11 @@
.n(2)
.k(1)
.cm_stride(5)
- .Test(xnn_qs8_igemm_minmax_fp32_ukernel_2x2__scalar_magic, xnn_init_qs8_conv_minmax_fp32_scalar_magic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qs8_igemm_minmax_fp32_ukernel_2x2__scalar_fmagic, xnn_init_qs8_conv_minmax_fp32_scalar_fmagic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
-TEST(QS8_IGEMM_MINMAX_FP32_3X2__SCALAR_MAGIC, k_eq_1) {
+TEST(QS8_IGEMM_MINMAX_FP32_3X2__SCALAR_FMAGIC, k_eq_1) {
GemmMicrokernelTester()
.mr(3)
.nr(2)
@@ -77832,10 +77832,10 @@
.m(3)
.n(2)
.k(1)
- .Test(xnn_qs8_igemm_minmax_fp32_ukernel_3x2__scalar_magic, xnn_init_qs8_conv_minmax_fp32_scalar_magic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qs8_igemm_minmax_fp32_ukernel_3x2__scalar_fmagic, xnn_init_qs8_conv_minmax_fp32_scalar_fmagic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
-TEST(QS8_IGEMM_MINMAX_FP32_3X2__SCALAR_MAGIC, strided_cn) {
+TEST(QS8_IGEMM_MINMAX_FP32_3X2__SCALAR_FMAGIC, strided_cn) {
GemmMicrokernelTester()
.mr(3)
.nr(2)
@@ -77845,10 +77845,10 @@
.n(2)
.k(1)
.cn_stride(5)
- .Test(xnn_qs8_igemm_minmax_fp32_ukernel_3x2__scalar_magic, xnn_init_qs8_conv_minmax_fp32_scalar_magic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qs8_igemm_minmax_fp32_ukernel_3x2__scalar_fmagic, xnn_init_qs8_conv_minmax_fp32_scalar_fmagic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
-TEST(QS8_IGEMM_MINMAX_FP32_3X2__SCALAR_MAGIC, k_eq_1_subtile) {
+TEST(QS8_IGEMM_MINMAX_FP32_3X2__SCALAR_FMAGIC, k_eq_1_subtile) {
for (uint32_t m = 1; m <= 3; m++) {
for (uint32_t n = 1; n <= 2; n++) {
GemmMicrokernelTester()
@@ -77860,12 +77860,12 @@
.n(n)
.k(1)
.iterations(1)
- .Test(xnn_qs8_igemm_minmax_fp32_ukernel_3x2__scalar_magic, xnn_init_qs8_conv_minmax_fp32_scalar_magic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qs8_igemm_minmax_fp32_ukernel_3x2__scalar_fmagic, xnn_init_qs8_conv_minmax_fp32_scalar_fmagic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
}
-TEST(QS8_IGEMM_MINMAX_FP32_3X2__SCALAR_MAGIC, k_eq_1_subtile_m) {
+TEST(QS8_IGEMM_MINMAX_FP32_3X2__SCALAR_FMAGIC, k_eq_1_subtile_m) {
for (uint32_t m = 1; m <= 3; m++) {
GemmMicrokernelTester()
.mr(3)
@@ -77876,11 +77876,11 @@
.n(2)
.k(1)
.iterations(1)
- .Test(xnn_qs8_igemm_minmax_fp32_ukernel_3x2__scalar_magic, xnn_init_qs8_conv_minmax_fp32_scalar_magic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qs8_igemm_minmax_fp32_ukernel_3x2__scalar_fmagic, xnn_init_qs8_conv_minmax_fp32_scalar_fmagic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
-TEST(QS8_IGEMM_MINMAX_FP32_3X2__SCALAR_MAGIC, k_eq_1_subtile_n) {
+TEST(QS8_IGEMM_MINMAX_FP32_3X2__SCALAR_FMAGIC, k_eq_1_subtile_n) {
for (uint32_t n = 1; n <= 2; n++) {
GemmMicrokernelTester()
.mr(3)
@@ -77891,11 +77891,11 @@
.n(n)
.k(1)
.iterations(1)
- .Test(xnn_qs8_igemm_minmax_fp32_ukernel_3x2__scalar_magic, xnn_init_qs8_conv_minmax_fp32_scalar_magic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qs8_igemm_minmax_fp32_ukernel_3x2__scalar_fmagic, xnn_init_qs8_conv_minmax_fp32_scalar_fmagic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
-TEST(QS8_IGEMM_MINMAX_FP32_3X2__SCALAR_MAGIC, k_gt_1) {
+TEST(QS8_IGEMM_MINMAX_FP32_3X2__SCALAR_FMAGIC, k_gt_1) {
for (size_t k = 2; k < 10; k++) {
GemmMicrokernelTester()
.mr(3)
@@ -77905,11 +77905,11 @@
.m(3)
.n(2)
.k(k)
- .Test(xnn_qs8_igemm_minmax_fp32_ukernel_3x2__scalar_magic, xnn_init_qs8_conv_minmax_fp32_scalar_magic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qs8_igemm_minmax_fp32_ukernel_3x2__scalar_fmagic, xnn_init_qs8_conv_minmax_fp32_scalar_fmagic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
-TEST(QS8_IGEMM_MINMAX_FP32_3X2__SCALAR_MAGIC, k_gt_1_subtile) {
+TEST(QS8_IGEMM_MINMAX_FP32_3X2__SCALAR_FMAGIC, k_gt_1_subtile) {
for (size_t k = 2; k < 10; k++) {
for (uint32_t m = 1; m <= 3; m++) {
for (uint32_t n = 1; n <= 2; n++) {
@@ -77922,13 +77922,13 @@
.n(n)
.k(k)
.iterations(1)
- .Test(xnn_qs8_igemm_minmax_fp32_ukernel_3x2__scalar_magic, xnn_init_qs8_conv_minmax_fp32_scalar_magic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qs8_igemm_minmax_fp32_ukernel_3x2__scalar_fmagic, xnn_init_qs8_conv_minmax_fp32_scalar_fmagic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
}
}
-TEST(QS8_IGEMM_MINMAX_FP32_3X2__SCALAR_MAGIC, n_gt_2) {
+TEST(QS8_IGEMM_MINMAX_FP32_3X2__SCALAR_FMAGIC, n_gt_2) {
for (uint32_t n = 3; n < 4; n++) {
for (size_t k = 1; k <= 5; k += 2) {
GemmMicrokernelTester()
@@ -77939,12 +77939,12 @@
.m(3)
.n(2)
.k(k)
- .Test(xnn_qs8_igemm_minmax_fp32_ukernel_3x2__scalar_magic, xnn_init_qs8_conv_minmax_fp32_scalar_magic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qs8_igemm_minmax_fp32_ukernel_3x2__scalar_fmagic, xnn_init_qs8_conv_minmax_fp32_scalar_fmagic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
}
-TEST(QS8_IGEMM_MINMAX_FP32_3X2__SCALAR_MAGIC, n_gt_2_strided_cn) {
+TEST(QS8_IGEMM_MINMAX_FP32_3X2__SCALAR_FMAGIC, n_gt_2_strided_cn) {
for (uint32_t n = 3; n < 4; n++) {
for (size_t k = 1; k <= 5; k += 2) {
GemmMicrokernelTester()
@@ -77956,12 +77956,12 @@
.n(2)
.k(k)
.cn_stride(5)
- .Test(xnn_qs8_igemm_minmax_fp32_ukernel_3x2__scalar_magic, xnn_init_qs8_conv_minmax_fp32_scalar_magic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qs8_igemm_minmax_fp32_ukernel_3x2__scalar_fmagic, xnn_init_qs8_conv_minmax_fp32_scalar_fmagic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
}
-TEST(QS8_IGEMM_MINMAX_FP32_3X2__SCALAR_MAGIC, n_gt_2_subtile) {
+TEST(QS8_IGEMM_MINMAX_FP32_3X2__SCALAR_FMAGIC, n_gt_2_subtile) {
for (uint32_t n = 3; n < 4; n++) {
for (size_t k = 1; k <= 5; k += 2) {
for (uint32_t m = 1; m <= 3; m++) {
@@ -77974,13 +77974,13 @@
.n(n)
.k(k)
.iterations(1)
- .Test(xnn_qs8_igemm_minmax_fp32_ukernel_3x2__scalar_magic, xnn_init_qs8_conv_minmax_fp32_scalar_magic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qs8_igemm_minmax_fp32_ukernel_3x2__scalar_fmagic, xnn_init_qs8_conv_minmax_fp32_scalar_fmagic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
}
}
-TEST(QS8_IGEMM_MINMAX_FP32_3X2__SCALAR_MAGIC, n_div_2) {
+TEST(QS8_IGEMM_MINMAX_FP32_3X2__SCALAR_FMAGIC, n_div_2) {
for (uint32_t n = 4; n <= 6; n += 2) {
for (size_t k = 1; k <= 5; k += 2) {
GemmMicrokernelTester()
@@ -77991,12 +77991,12 @@
.m(3)
.n(2)
.k(k)
- .Test(xnn_qs8_igemm_minmax_fp32_ukernel_3x2__scalar_magic, xnn_init_qs8_conv_minmax_fp32_scalar_magic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qs8_igemm_minmax_fp32_ukernel_3x2__scalar_fmagic, xnn_init_qs8_conv_minmax_fp32_scalar_fmagic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
}
-TEST(QS8_IGEMM_MINMAX_FP32_3X2__SCALAR_MAGIC, n_div_2_strided_cn) {
+TEST(QS8_IGEMM_MINMAX_FP32_3X2__SCALAR_FMAGIC, n_div_2_strided_cn) {
for (uint32_t n = 4; n <= 6; n += 2) {
for (size_t k = 1; k <= 5; k += 2) {
GemmMicrokernelTester()
@@ -78008,12 +78008,12 @@
.n(n)
.k(k)
.cn_stride(5)
- .Test(xnn_qs8_igemm_minmax_fp32_ukernel_3x2__scalar_magic, xnn_init_qs8_conv_minmax_fp32_scalar_magic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qs8_igemm_minmax_fp32_ukernel_3x2__scalar_fmagic, xnn_init_qs8_conv_minmax_fp32_scalar_fmagic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
}
-TEST(QS8_IGEMM_MINMAX_FP32_3X2__SCALAR_MAGIC, n_div_2_subtile) {
+TEST(QS8_IGEMM_MINMAX_FP32_3X2__SCALAR_FMAGIC, n_div_2_subtile) {
for (uint32_t n = 4; n <= 6; n += 2) {
for (size_t k = 1; k <= 5; k += 2) {
for (uint32_t m = 1; m <= 3; m++) {
@@ -78026,13 +78026,13 @@
.n(n)
.k(k)
.iterations(1)
- .Test(xnn_qs8_igemm_minmax_fp32_ukernel_3x2__scalar_magic, xnn_init_qs8_conv_minmax_fp32_scalar_magic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qs8_igemm_minmax_fp32_ukernel_3x2__scalar_fmagic, xnn_init_qs8_conv_minmax_fp32_scalar_fmagic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
}
}
-TEST(QS8_IGEMM_MINMAX_FP32_3X2__SCALAR_MAGIC, small_kernel) {
+TEST(QS8_IGEMM_MINMAX_FP32_3X2__SCALAR_FMAGIC, small_kernel) {
for (size_t k = 1; k <= 5; k += 2) {
GemmMicrokernelTester()
.mr(3)
@@ -78043,11 +78043,11 @@
.n(2)
.k(k)
.ks(3)
- .Test(xnn_qs8_igemm_minmax_fp32_ukernel_3x2__scalar_magic, xnn_init_qs8_conv_minmax_fp32_scalar_magic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qs8_igemm_minmax_fp32_ukernel_3x2__scalar_fmagic, xnn_init_qs8_conv_minmax_fp32_scalar_fmagic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
-TEST(QS8_IGEMM_MINMAX_FP32_3X2__SCALAR_MAGIC, small_kernel_subtile) {
+TEST(QS8_IGEMM_MINMAX_FP32_3X2__SCALAR_FMAGIC, small_kernel_subtile) {
for (size_t k = 1; k <= 5; k += 2) {
for (uint32_t m = 1; m <= 3; m++) {
for (uint32_t n = 1; n <= 2; n++) {
@@ -78061,13 +78061,13 @@
.k(k)
.ks(3)
.iterations(1)
- .Test(xnn_qs8_igemm_minmax_fp32_ukernel_3x2__scalar_magic, xnn_init_qs8_conv_minmax_fp32_scalar_magic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qs8_igemm_minmax_fp32_ukernel_3x2__scalar_fmagic, xnn_init_qs8_conv_minmax_fp32_scalar_fmagic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
}
}
-TEST(QS8_IGEMM_MINMAX_FP32_3X2__SCALAR_MAGIC, n_gt_2_small_kernel) {
+TEST(QS8_IGEMM_MINMAX_FP32_3X2__SCALAR_FMAGIC, n_gt_2_small_kernel) {
for (uint32_t n = 3; n < 4; n++) {
for (size_t k = 1; k <= 5; k += 2) {
GemmMicrokernelTester()
@@ -78079,12 +78079,12 @@
.n(2)
.k(k)
.ks(3)
- .Test(xnn_qs8_igemm_minmax_fp32_ukernel_3x2__scalar_magic, xnn_init_qs8_conv_minmax_fp32_scalar_magic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qs8_igemm_minmax_fp32_ukernel_3x2__scalar_fmagic, xnn_init_qs8_conv_minmax_fp32_scalar_fmagic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
}
-TEST(QS8_IGEMM_MINMAX_FP32_3X2__SCALAR_MAGIC, n_div_2_small_kernel) {
+TEST(QS8_IGEMM_MINMAX_FP32_3X2__SCALAR_FMAGIC, n_div_2_small_kernel) {
for (uint32_t n = 4; n <= 6; n += 2) {
for (size_t k = 1; k <= 5; k += 2) {
GemmMicrokernelTester()
@@ -78096,12 +78096,12 @@
.n(2)
.k(k)
.ks(3)
- .Test(xnn_qs8_igemm_minmax_fp32_ukernel_3x2__scalar_magic, xnn_init_qs8_conv_minmax_fp32_scalar_magic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qs8_igemm_minmax_fp32_ukernel_3x2__scalar_fmagic, xnn_init_qs8_conv_minmax_fp32_scalar_fmagic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
}
-TEST(QS8_IGEMM_MINMAX_FP32_3X2__SCALAR_MAGIC, strided_cm_subtile) {
+TEST(QS8_IGEMM_MINMAX_FP32_3X2__SCALAR_FMAGIC, strided_cm_subtile) {
for (size_t k = 1; k <= 5; k += 2) {
for (uint32_t m = 1; m <= 3; m++) {
for (uint32_t n = 1; n <= 2; n++) {
@@ -78115,13 +78115,13 @@
.k(k)
.cm_stride(5)
.iterations(1)
- .Test(xnn_qs8_igemm_minmax_fp32_ukernel_3x2__scalar_magic, xnn_init_qs8_conv_minmax_fp32_scalar_magic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qs8_igemm_minmax_fp32_ukernel_3x2__scalar_fmagic, xnn_init_qs8_conv_minmax_fp32_scalar_fmagic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
}
}
-TEST(QS8_IGEMM_MINMAX_FP32_3X2__SCALAR_MAGIC, a_offset) {
+TEST(QS8_IGEMM_MINMAX_FP32_3X2__SCALAR_FMAGIC, a_offset) {
for (size_t k = 1; k <= 5; k += 2) {
GemmMicrokernelTester()
.mr(3)
@@ -78133,11 +78133,11 @@
.k(k)
.ks(3)
.a_offset(17)
- .Test(xnn_qs8_igemm_minmax_fp32_ukernel_3x2__scalar_magic, xnn_init_qs8_conv_minmax_fp32_scalar_magic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qs8_igemm_minmax_fp32_ukernel_3x2__scalar_fmagic, xnn_init_qs8_conv_minmax_fp32_scalar_fmagic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
-TEST(QS8_IGEMM_MINMAX_FP32_3X2__SCALAR_MAGIC, zero) {
+TEST(QS8_IGEMM_MINMAX_FP32_3X2__SCALAR_FMAGIC, zero) {
for (uint32_t mz = 0; mz < 3; mz++) {
for (size_t k = 1; k <= 5; k += 2) {
GemmMicrokernelTester()
@@ -78151,12 +78151,12 @@
.ks(3)
.a_offset(17)
.zero_index(mz)
- .Test(xnn_qs8_igemm_minmax_fp32_ukernel_3x2__scalar_magic, xnn_init_qs8_conv_minmax_fp32_scalar_magic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qs8_igemm_minmax_fp32_ukernel_3x2__scalar_fmagic, xnn_init_qs8_conv_minmax_fp32_scalar_fmagic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
}
-TEST(QS8_IGEMM_MINMAX_FP32_3X2__SCALAR_MAGIC, qmin) {
+TEST(QS8_IGEMM_MINMAX_FP32_3X2__SCALAR_FMAGIC, qmin) {
GemmMicrokernelTester()
.mr(3)
.nr(2)
@@ -78166,10 +78166,10 @@
.n(2)
.k(1)
.qmin(128)
- .Test(xnn_qs8_igemm_minmax_fp32_ukernel_3x2__scalar_magic, xnn_init_qs8_conv_minmax_fp32_scalar_magic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qs8_igemm_minmax_fp32_ukernel_3x2__scalar_fmagic, xnn_init_qs8_conv_minmax_fp32_scalar_fmagic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
-TEST(QS8_IGEMM_MINMAX_FP32_3X2__SCALAR_MAGIC, qmax) {
+TEST(QS8_IGEMM_MINMAX_FP32_3X2__SCALAR_FMAGIC, qmax) {
GemmMicrokernelTester()
.mr(3)
.nr(2)
@@ -78179,10 +78179,10 @@
.n(2)
.k(1)
.qmax(128)
- .Test(xnn_qs8_igemm_minmax_fp32_ukernel_3x2__scalar_magic, xnn_init_qs8_conv_minmax_fp32_scalar_magic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qs8_igemm_minmax_fp32_ukernel_3x2__scalar_fmagic, xnn_init_qs8_conv_minmax_fp32_scalar_fmagic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
-TEST(QS8_IGEMM_MINMAX_FP32_3X2__SCALAR_MAGIC, strided_cm) {
+TEST(QS8_IGEMM_MINMAX_FP32_3X2__SCALAR_FMAGIC, strided_cm) {
GemmMicrokernelTester()
.mr(3)
.nr(2)
@@ -78192,11 +78192,11 @@
.n(2)
.k(1)
.cm_stride(5)
- .Test(xnn_qs8_igemm_minmax_fp32_ukernel_3x2__scalar_magic, xnn_init_qs8_conv_minmax_fp32_scalar_magic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qs8_igemm_minmax_fp32_ukernel_3x2__scalar_fmagic, xnn_init_qs8_conv_minmax_fp32_scalar_fmagic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
-TEST(QS8_IGEMM_MINMAX_FP32_4X2__SCALAR_MAGIC, k_eq_1) {
+TEST(QS8_IGEMM_MINMAX_FP32_4X2__SCALAR_FMAGIC, k_eq_1) {
GemmMicrokernelTester()
.mr(4)
.nr(2)
@@ -78205,10 +78205,10 @@
.m(4)
.n(2)
.k(1)
- .Test(xnn_qs8_igemm_minmax_fp32_ukernel_4x2__scalar_magic, xnn_init_qs8_conv_minmax_fp32_scalar_magic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qs8_igemm_minmax_fp32_ukernel_4x2__scalar_fmagic, xnn_init_qs8_conv_minmax_fp32_scalar_fmagic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
-TEST(QS8_IGEMM_MINMAX_FP32_4X2__SCALAR_MAGIC, strided_cn) {
+TEST(QS8_IGEMM_MINMAX_FP32_4X2__SCALAR_FMAGIC, strided_cn) {
GemmMicrokernelTester()
.mr(4)
.nr(2)
@@ -78218,10 +78218,10 @@
.n(2)
.k(1)
.cn_stride(5)
- .Test(xnn_qs8_igemm_minmax_fp32_ukernel_4x2__scalar_magic, xnn_init_qs8_conv_minmax_fp32_scalar_magic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qs8_igemm_minmax_fp32_ukernel_4x2__scalar_fmagic, xnn_init_qs8_conv_minmax_fp32_scalar_fmagic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
-TEST(QS8_IGEMM_MINMAX_FP32_4X2__SCALAR_MAGIC, k_eq_1_subtile) {
+TEST(QS8_IGEMM_MINMAX_FP32_4X2__SCALAR_FMAGIC, k_eq_1_subtile) {
for (uint32_t m = 1; m <= 4; m++) {
for (uint32_t n = 1; n <= 2; n++) {
GemmMicrokernelTester()
@@ -78233,12 +78233,12 @@
.n(n)
.k(1)
.iterations(1)
- .Test(xnn_qs8_igemm_minmax_fp32_ukernel_4x2__scalar_magic, xnn_init_qs8_conv_minmax_fp32_scalar_magic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qs8_igemm_minmax_fp32_ukernel_4x2__scalar_fmagic, xnn_init_qs8_conv_minmax_fp32_scalar_fmagic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
}
-TEST(QS8_IGEMM_MINMAX_FP32_4X2__SCALAR_MAGIC, k_eq_1_subtile_m) {
+TEST(QS8_IGEMM_MINMAX_FP32_4X2__SCALAR_FMAGIC, k_eq_1_subtile_m) {
for (uint32_t m = 1; m <= 4; m++) {
GemmMicrokernelTester()
.mr(4)
@@ -78249,11 +78249,11 @@
.n(2)
.k(1)
.iterations(1)
- .Test(xnn_qs8_igemm_minmax_fp32_ukernel_4x2__scalar_magic, xnn_init_qs8_conv_minmax_fp32_scalar_magic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qs8_igemm_minmax_fp32_ukernel_4x2__scalar_fmagic, xnn_init_qs8_conv_minmax_fp32_scalar_fmagic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
-TEST(QS8_IGEMM_MINMAX_FP32_4X2__SCALAR_MAGIC, k_eq_1_subtile_n) {
+TEST(QS8_IGEMM_MINMAX_FP32_4X2__SCALAR_FMAGIC, k_eq_1_subtile_n) {
for (uint32_t n = 1; n <= 2; n++) {
GemmMicrokernelTester()
.mr(4)
@@ -78264,11 +78264,11 @@
.n(n)
.k(1)
.iterations(1)
- .Test(xnn_qs8_igemm_minmax_fp32_ukernel_4x2__scalar_magic, xnn_init_qs8_conv_minmax_fp32_scalar_magic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qs8_igemm_minmax_fp32_ukernel_4x2__scalar_fmagic, xnn_init_qs8_conv_minmax_fp32_scalar_fmagic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
-TEST(QS8_IGEMM_MINMAX_FP32_4X2__SCALAR_MAGIC, k_gt_1) {
+TEST(QS8_IGEMM_MINMAX_FP32_4X2__SCALAR_FMAGIC, k_gt_1) {
for (size_t k = 2; k < 10; k++) {
GemmMicrokernelTester()
.mr(4)
@@ -78278,11 +78278,11 @@
.m(4)
.n(2)
.k(k)
- .Test(xnn_qs8_igemm_minmax_fp32_ukernel_4x2__scalar_magic, xnn_init_qs8_conv_minmax_fp32_scalar_magic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qs8_igemm_minmax_fp32_ukernel_4x2__scalar_fmagic, xnn_init_qs8_conv_minmax_fp32_scalar_fmagic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
-TEST(QS8_IGEMM_MINMAX_FP32_4X2__SCALAR_MAGIC, k_gt_1_subtile) {
+TEST(QS8_IGEMM_MINMAX_FP32_4X2__SCALAR_FMAGIC, k_gt_1_subtile) {
for (size_t k = 2; k < 10; k++) {
for (uint32_t m = 1; m <= 4; m++) {
for (uint32_t n = 1; n <= 2; n++) {
@@ -78295,13 +78295,13 @@
.n(n)
.k(k)
.iterations(1)
- .Test(xnn_qs8_igemm_minmax_fp32_ukernel_4x2__scalar_magic, xnn_init_qs8_conv_minmax_fp32_scalar_magic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qs8_igemm_minmax_fp32_ukernel_4x2__scalar_fmagic, xnn_init_qs8_conv_minmax_fp32_scalar_fmagic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
}
}
-TEST(QS8_IGEMM_MINMAX_FP32_4X2__SCALAR_MAGIC, n_gt_2) {
+TEST(QS8_IGEMM_MINMAX_FP32_4X2__SCALAR_FMAGIC, n_gt_2) {
for (uint32_t n = 3; n < 4; n++) {
for (size_t k = 1; k <= 5; k += 2) {
GemmMicrokernelTester()
@@ -78312,12 +78312,12 @@
.m(4)
.n(2)
.k(k)
- .Test(xnn_qs8_igemm_minmax_fp32_ukernel_4x2__scalar_magic, xnn_init_qs8_conv_minmax_fp32_scalar_magic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qs8_igemm_minmax_fp32_ukernel_4x2__scalar_fmagic, xnn_init_qs8_conv_minmax_fp32_scalar_fmagic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
}
-TEST(QS8_IGEMM_MINMAX_FP32_4X2__SCALAR_MAGIC, n_gt_2_strided_cn) {
+TEST(QS8_IGEMM_MINMAX_FP32_4X2__SCALAR_FMAGIC, n_gt_2_strided_cn) {
for (uint32_t n = 3; n < 4; n++) {
for (size_t k = 1; k <= 5; k += 2) {
GemmMicrokernelTester()
@@ -78329,12 +78329,12 @@
.n(2)
.k(k)
.cn_stride(5)
- .Test(xnn_qs8_igemm_minmax_fp32_ukernel_4x2__scalar_magic, xnn_init_qs8_conv_minmax_fp32_scalar_magic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qs8_igemm_minmax_fp32_ukernel_4x2__scalar_fmagic, xnn_init_qs8_conv_minmax_fp32_scalar_fmagic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
}
-TEST(QS8_IGEMM_MINMAX_FP32_4X2__SCALAR_MAGIC, n_gt_2_subtile) {
+TEST(QS8_IGEMM_MINMAX_FP32_4X2__SCALAR_FMAGIC, n_gt_2_subtile) {
for (uint32_t n = 3; n < 4; n++) {
for (size_t k = 1; k <= 5; k += 2) {
for (uint32_t m = 1; m <= 4; m++) {
@@ -78347,13 +78347,13 @@
.n(n)
.k(k)
.iterations(1)
- .Test(xnn_qs8_igemm_minmax_fp32_ukernel_4x2__scalar_magic, xnn_init_qs8_conv_minmax_fp32_scalar_magic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qs8_igemm_minmax_fp32_ukernel_4x2__scalar_fmagic, xnn_init_qs8_conv_minmax_fp32_scalar_fmagic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
}
}
-TEST(QS8_IGEMM_MINMAX_FP32_4X2__SCALAR_MAGIC, n_div_2) {
+TEST(QS8_IGEMM_MINMAX_FP32_4X2__SCALAR_FMAGIC, n_div_2) {
for (uint32_t n = 4; n <= 6; n += 2) {
for (size_t k = 1; k <= 5; k += 2) {
GemmMicrokernelTester()
@@ -78364,12 +78364,12 @@
.m(4)
.n(2)
.k(k)
- .Test(xnn_qs8_igemm_minmax_fp32_ukernel_4x2__scalar_magic, xnn_init_qs8_conv_minmax_fp32_scalar_magic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qs8_igemm_minmax_fp32_ukernel_4x2__scalar_fmagic, xnn_init_qs8_conv_minmax_fp32_scalar_fmagic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
}
-TEST(QS8_IGEMM_MINMAX_FP32_4X2__SCALAR_MAGIC, n_div_2_strided_cn) {
+TEST(QS8_IGEMM_MINMAX_FP32_4X2__SCALAR_FMAGIC, n_div_2_strided_cn) {
for (uint32_t n = 4; n <= 6; n += 2) {
for (size_t k = 1; k <= 5; k += 2) {
GemmMicrokernelTester()
@@ -78381,12 +78381,12 @@
.n(n)
.k(k)
.cn_stride(5)
- .Test(xnn_qs8_igemm_minmax_fp32_ukernel_4x2__scalar_magic, xnn_init_qs8_conv_minmax_fp32_scalar_magic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qs8_igemm_minmax_fp32_ukernel_4x2__scalar_fmagic, xnn_init_qs8_conv_minmax_fp32_scalar_fmagic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
}
-TEST(QS8_IGEMM_MINMAX_FP32_4X2__SCALAR_MAGIC, n_div_2_subtile) {
+TEST(QS8_IGEMM_MINMAX_FP32_4X2__SCALAR_FMAGIC, n_div_2_subtile) {
for (uint32_t n = 4; n <= 6; n += 2) {
for (size_t k = 1; k <= 5; k += 2) {
for (uint32_t m = 1; m <= 4; m++) {
@@ -78399,13 +78399,13 @@
.n(n)
.k(k)
.iterations(1)
- .Test(xnn_qs8_igemm_minmax_fp32_ukernel_4x2__scalar_magic, xnn_init_qs8_conv_minmax_fp32_scalar_magic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qs8_igemm_minmax_fp32_ukernel_4x2__scalar_fmagic, xnn_init_qs8_conv_minmax_fp32_scalar_fmagic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
}
}
-TEST(QS8_IGEMM_MINMAX_FP32_4X2__SCALAR_MAGIC, small_kernel) {
+TEST(QS8_IGEMM_MINMAX_FP32_4X2__SCALAR_FMAGIC, small_kernel) {
for (size_t k = 1; k <= 5; k += 2) {
GemmMicrokernelTester()
.mr(4)
@@ -78416,11 +78416,11 @@
.n(2)
.k(k)
.ks(3)
- .Test(xnn_qs8_igemm_minmax_fp32_ukernel_4x2__scalar_magic, xnn_init_qs8_conv_minmax_fp32_scalar_magic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qs8_igemm_minmax_fp32_ukernel_4x2__scalar_fmagic, xnn_init_qs8_conv_minmax_fp32_scalar_fmagic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
-TEST(QS8_IGEMM_MINMAX_FP32_4X2__SCALAR_MAGIC, small_kernel_subtile) {
+TEST(QS8_IGEMM_MINMAX_FP32_4X2__SCALAR_FMAGIC, small_kernel_subtile) {
for (size_t k = 1; k <= 5; k += 2) {
for (uint32_t m = 1; m <= 4; m++) {
for (uint32_t n = 1; n <= 2; n++) {
@@ -78434,13 +78434,13 @@
.k(k)
.ks(3)
.iterations(1)
- .Test(xnn_qs8_igemm_minmax_fp32_ukernel_4x2__scalar_magic, xnn_init_qs8_conv_minmax_fp32_scalar_magic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qs8_igemm_minmax_fp32_ukernel_4x2__scalar_fmagic, xnn_init_qs8_conv_minmax_fp32_scalar_fmagic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
}
}
-TEST(QS8_IGEMM_MINMAX_FP32_4X2__SCALAR_MAGIC, n_gt_2_small_kernel) {
+TEST(QS8_IGEMM_MINMAX_FP32_4X2__SCALAR_FMAGIC, n_gt_2_small_kernel) {
for (uint32_t n = 3; n < 4; n++) {
for (size_t k = 1; k <= 5; k += 2) {
GemmMicrokernelTester()
@@ -78452,12 +78452,12 @@
.n(2)
.k(k)
.ks(3)
- .Test(xnn_qs8_igemm_minmax_fp32_ukernel_4x2__scalar_magic, xnn_init_qs8_conv_minmax_fp32_scalar_magic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qs8_igemm_minmax_fp32_ukernel_4x2__scalar_fmagic, xnn_init_qs8_conv_minmax_fp32_scalar_fmagic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
}
-TEST(QS8_IGEMM_MINMAX_FP32_4X2__SCALAR_MAGIC, n_div_2_small_kernel) {
+TEST(QS8_IGEMM_MINMAX_FP32_4X2__SCALAR_FMAGIC, n_div_2_small_kernel) {
for (uint32_t n = 4; n <= 6; n += 2) {
for (size_t k = 1; k <= 5; k += 2) {
GemmMicrokernelTester()
@@ -78469,12 +78469,12 @@
.n(2)
.k(k)
.ks(3)
- .Test(xnn_qs8_igemm_minmax_fp32_ukernel_4x2__scalar_magic, xnn_init_qs8_conv_minmax_fp32_scalar_magic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qs8_igemm_minmax_fp32_ukernel_4x2__scalar_fmagic, xnn_init_qs8_conv_minmax_fp32_scalar_fmagic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
}
-TEST(QS8_IGEMM_MINMAX_FP32_4X2__SCALAR_MAGIC, strided_cm_subtile) {
+TEST(QS8_IGEMM_MINMAX_FP32_4X2__SCALAR_FMAGIC, strided_cm_subtile) {
for (size_t k = 1; k <= 5; k += 2) {
for (uint32_t m = 1; m <= 4; m++) {
for (uint32_t n = 1; n <= 2; n++) {
@@ -78488,13 +78488,13 @@
.k(k)
.cm_stride(5)
.iterations(1)
- .Test(xnn_qs8_igemm_minmax_fp32_ukernel_4x2__scalar_magic, xnn_init_qs8_conv_minmax_fp32_scalar_magic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qs8_igemm_minmax_fp32_ukernel_4x2__scalar_fmagic, xnn_init_qs8_conv_minmax_fp32_scalar_fmagic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
}
}
-TEST(QS8_IGEMM_MINMAX_FP32_4X2__SCALAR_MAGIC, a_offset) {
+TEST(QS8_IGEMM_MINMAX_FP32_4X2__SCALAR_FMAGIC, a_offset) {
for (size_t k = 1; k <= 5; k += 2) {
GemmMicrokernelTester()
.mr(4)
@@ -78506,11 +78506,11 @@
.k(k)
.ks(3)
.a_offset(23)
- .Test(xnn_qs8_igemm_minmax_fp32_ukernel_4x2__scalar_magic, xnn_init_qs8_conv_minmax_fp32_scalar_magic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qs8_igemm_minmax_fp32_ukernel_4x2__scalar_fmagic, xnn_init_qs8_conv_minmax_fp32_scalar_fmagic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
-TEST(QS8_IGEMM_MINMAX_FP32_4X2__SCALAR_MAGIC, zero) {
+TEST(QS8_IGEMM_MINMAX_FP32_4X2__SCALAR_FMAGIC, zero) {
for (uint32_t mz = 0; mz < 4; mz++) {
for (size_t k = 1; k <= 5; k += 2) {
GemmMicrokernelTester()
@@ -78524,12 +78524,12 @@
.ks(3)
.a_offset(23)
.zero_index(mz)
- .Test(xnn_qs8_igemm_minmax_fp32_ukernel_4x2__scalar_magic, xnn_init_qs8_conv_minmax_fp32_scalar_magic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qs8_igemm_minmax_fp32_ukernel_4x2__scalar_fmagic, xnn_init_qs8_conv_minmax_fp32_scalar_fmagic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
}
-TEST(QS8_IGEMM_MINMAX_FP32_4X2__SCALAR_MAGIC, qmin) {
+TEST(QS8_IGEMM_MINMAX_FP32_4X2__SCALAR_FMAGIC, qmin) {
GemmMicrokernelTester()
.mr(4)
.nr(2)
@@ -78539,10 +78539,10 @@
.n(2)
.k(1)
.qmin(128)
- .Test(xnn_qs8_igemm_minmax_fp32_ukernel_4x2__scalar_magic, xnn_init_qs8_conv_minmax_fp32_scalar_magic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qs8_igemm_minmax_fp32_ukernel_4x2__scalar_fmagic, xnn_init_qs8_conv_minmax_fp32_scalar_fmagic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
-TEST(QS8_IGEMM_MINMAX_FP32_4X2__SCALAR_MAGIC, qmax) {
+TEST(QS8_IGEMM_MINMAX_FP32_4X2__SCALAR_FMAGIC, qmax) {
GemmMicrokernelTester()
.mr(4)
.nr(2)
@@ -78552,10 +78552,10 @@
.n(2)
.k(1)
.qmax(128)
- .Test(xnn_qs8_igemm_minmax_fp32_ukernel_4x2__scalar_magic, xnn_init_qs8_conv_minmax_fp32_scalar_magic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qs8_igemm_minmax_fp32_ukernel_4x2__scalar_fmagic, xnn_init_qs8_conv_minmax_fp32_scalar_fmagic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
-TEST(QS8_IGEMM_MINMAX_FP32_4X2__SCALAR_MAGIC, strided_cm) {
+TEST(QS8_IGEMM_MINMAX_FP32_4X2__SCALAR_FMAGIC, strided_cm) {
GemmMicrokernelTester()
.mr(4)
.nr(2)
@@ -78565,11 +78565,11 @@
.n(2)
.k(1)
.cm_stride(5)
- .Test(xnn_qs8_igemm_minmax_fp32_ukernel_4x2__scalar_magic, xnn_init_qs8_conv_minmax_fp32_scalar_magic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qs8_igemm_minmax_fp32_ukernel_4x2__scalar_fmagic, xnn_init_qs8_conv_minmax_fp32_scalar_fmagic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
-TEST(QS8_IGEMM_MINMAX_FP32_1X4__SCALAR_MAGIC, k_eq_1) {
+TEST(QS8_IGEMM_MINMAX_FP32_1X4__SCALAR_FMAGIC, k_eq_1) {
GemmMicrokernelTester()
.mr(1)
.nr(4)
@@ -78578,10 +78578,10 @@
.m(1)
.n(4)
.k(1)
- .Test(xnn_qs8_igemm_minmax_fp32_ukernel_1x4__scalar_magic, xnn_init_qs8_conv_minmax_fp32_scalar_magic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qs8_igemm_minmax_fp32_ukernel_1x4__scalar_fmagic, xnn_init_qs8_conv_minmax_fp32_scalar_fmagic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
-TEST(QS8_IGEMM_MINMAX_FP32_1X4__SCALAR_MAGIC, strided_cn) {
+TEST(QS8_IGEMM_MINMAX_FP32_1X4__SCALAR_FMAGIC, strided_cn) {
GemmMicrokernelTester()
.mr(1)
.nr(4)
@@ -78591,10 +78591,10 @@
.n(4)
.k(1)
.cn_stride(7)
- .Test(xnn_qs8_igemm_minmax_fp32_ukernel_1x4__scalar_magic, xnn_init_qs8_conv_minmax_fp32_scalar_magic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qs8_igemm_minmax_fp32_ukernel_1x4__scalar_fmagic, xnn_init_qs8_conv_minmax_fp32_scalar_fmagic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
-TEST(QS8_IGEMM_MINMAX_FP32_1X4__SCALAR_MAGIC, k_eq_1_subtile) {
+TEST(QS8_IGEMM_MINMAX_FP32_1X4__SCALAR_FMAGIC, k_eq_1_subtile) {
for (uint32_t m = 1; m <= 1; m++) {
for (uint32_t n = 1; n <= 4; n++) {
GemmMicrokernelTester()
@@ -78606,12 +78606,12 @@
.n(n)
.k(1)
.iterations(1)
- .Test(xnn_qs8_igemm_minmax_fp32_ukernel_1x4__scalar_magic, xnn_init_qs8_conv_minmax_fp32_scalar_magic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qs8_igemm_minmax_fp32_ukernel_1x4__scalar_fmagic, xnn_init_qs8_conv_minmax_fp32_scalar_fmagic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
}
-TEST(QS8_IGEMM_MINMAX_FP32_1X4__SCALAR_MAGIC, k_eq_1_subtile_m) {
+TEST(QS8_IGEMM_MINMAX_FP32_1X4__SCALAR_FMAGIC, k_eq_1_subtile_m) {
for (uint32_t m = 1; m <= 1; m++) {
GemmMicrokernelTester()
.mr(1)
@@ -78622,11 +78622,11 @@
.n(4)
.k(1)
.iterations(1)
- .Test(xnn_qs8_igemm_minmax_fp32_ukernel_1x4__scalar_magic, xnn_init_qs8_conv_minmax_fp32_scalar_magic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qs8_igemm_minmax_fp32_ukernel_1x4__scalar_fmagic, xnn_init_qs8_conv_minmax_fp32_scalar_fmagic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
-TEST(QS8_IGEMM_MINMAX_FP32_1X4__SCALAR_MAGIC, k_eq_1_subtile_n) {
+TEST(QS8_IGEMM_MINMAX_FP32_1X4__SCALAR_FMAGIC, k_eq_1_subtile_n) {
for (uint32_t n = 1; n <= 4; n++) {
GemmMicrokernelTester()
.mr(1)
@@ -78637,11 +78637,11 @@
.n(n)
.k(1)
.iterations(1)
- .Test(xnn_qs8_igemm_minmax_fp32_ukernel_1x4__scalar_magic, xnn_init_qs8_conv_minmax_fp32_scalar_magic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qs8_igemm_minmax_fp32_ukernel_1x4__scalar_fmagic, xnn_init_qs8_conv_minmax_fp32_scalar_fmagic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
-TEST(QS8_IGEMM_MINMAX_FP32_1X4__SCALAR_MAGIC, k_gt_1) {
+TEST(QS8_IGEMM_MINMAX_FP32_1X4__SCALAR_FMAGIC, k_gt_1) {
for (size_t k = 2; k < 10; k++) {
GemmMicrokernelTester()
.mr(1)
@@ -78651,11 +78651,11 @@
.m(1)
.n(4)
.k(k)
- .Test(xnn_qs8_igemm_minmax_fp32_ukernel_1x4__scalar_magic, xnn_init_qs8_conv_minmax_fp32_scalar_magic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qs8_igemm_minmax_fp32_ukernel_1x4__scalar_fmagic, xnn_init_qs8_conv_minmax_fp32_scalar_fmagic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
-TEST(QS8_IGEMM_MINMAX_FP32_1X4__SCALAR_MAGIC, k_gt_1_subtile) {
+TEST(QS8_IGEMM_MINMAX_FP32_1X4__SCALAR_FMAGIC, k_gt_1_subtile) {
for (size_t k = 2; k < 10; k++) {
for (uint32_t m = 1; m <= 1; m++) {
for (uint32_t n = 1; n <= 4; n++) {
@@ -78668,13 +78668,13 @@
.n(n)
.k(k)
.iterations(1)
- .Test(xnn_qs8_igemm_minmax_fp32_ukernel_1x4__scalar_magic, xnn_init_qs8_conv_minmax_fp32_scalar_magic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qs8_igemm_minmax_fp32_ukernel_1x4__scalar_fmagic, xnn_init_qs8_conv_minmax_fp32_scalar_fmagic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
}
}
-TEST(QS8_IGEMM_MINMAX_FP32_1X4__SCALAR_MAGIC, n_gt_4) {
+TEST(QS8_IGEMM_MINMAX_FP32_1X4__SCALAR_FMAGIC, n_gt_4) {
for (uint32_t n = 5; n < 8; n++) {
for (size_t k = 1; k <= 5; k += 2) {
GemmMicrokernelTester()
@@ -78685,12 +78685,12 @@
.m(1)
.n(4)
.k(k)
- .Test(xnn_qs8_igemm_minmax_fp32_ukernel_1x4__scalar_magic, xnn_init_qs8_conv_minmax_fp32_scalar_magic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qs8_igemm_minmax_fp32_ukernel_1x4__scalar_fmagic, xnn_init_qs8_conv_minmax_fp32_scalar_fmagic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
}
-TEST(QS8_IGEMM_MINMAX_FP32_1X4__SCALAR_MAGIC, n_gt_4_strided_cn) {
+TEST(QS8_IGEMM_MINMAX_FP32_1X4__SCALAR_FMAGIC, n_gt_4_strided_cn) {
for (uint32_t n = 5; n < 8; n++) {
for (size_t k = 1; k <= 5; k += 2) {
GemmMicrokernelTester()
@@ -78702,12 +78702,12 @@
.n(4)
.k(k)
.cn_stride(7)
- .Test(xnn_qs8_igemm_minmax_fp32_ukernel_1x4__scalar_magic, xnn_init_qs8_conv_minmax_fp32_scalar_magic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qs8_igemm_minmax_fp32_ukernel_1x4__scalar_fmagic, xnn_init_qs8_conv_minmax_fp32_scalar_fmagic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
}
-TEST(QS8_IGEMM_MINMAX_FP32_1X4__SCALAR_MAGIC, n_gt_4_subtile) {
+TEST(QS8_IGEMM_MINMAX_FP32_1X4__SCALAR_FMAGIC, n_gt_4_subtile) {
for (uint32_t n = 5; n < 8; n++) {
for (size_t k = 1; k <= 5; k += 2) {
for (uint32_t m = 1; m <= 1; m++) {
@@ -78720,13 +78720,13 @@
.n(n)
.k(k)
.iterations(1)
- .Test(xnn_qs8_igemm_minmax_fp32_ukernel_1x4__scalar_magic, xnn_init_qs8_conv_minmax_fp32_scalar_magic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qs8_igemm_minmax_fp32_ukernel_1x4__scalar_fmagic, xnn_init_qs8_conv_minmax_fp32_scalar_fmagic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
}
}
-TEST(QS8_IGEMM_MINMAX_FP32_1X4__SCALAR_MAGIC, n_div_4) {
+TEST(QS8_IGEMM_MINMAX_FP32_1X4__SCALAR_FMAGIC, n_div_4) {
for (uint32_t n = 8; n <= 12; n += 4) {
for (size_t k = 1; k <= 5; k += 2) {
GemmMicrokernelTester()
@@ -78737,12 +78737,12 @@
.m(1)
.n(4)
.k(k)
- .Test(xnn_qs8_igemm_minmax_fp32_ukernel_1x4__scalar_magic, xnn_init_qs8_conv_minmax_fp32_scalar_magic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qs8_igemm_minmax_fp32_ukernel_1x4__scalar_fmagic, xnn_init_qs8_conv_minmax_fp32_scalar_fmagic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
}
-TEST(QS8_IGEMM_MINMAX_FP32_1X4__SCALAR_MAGIC, n_div_4_strided_cn) {
+TEST(QS8_IGEMM_MINMAX_FP32_1X4__SCALAR_FMAGIC, n_div_4_strided_cn) {
for (uint32_t n = 8; n <= 12; n += 4) {
for (size_t k = 1; k <= 5; k += 2) {
GemmMicrokernelTester()
@@ -78754,12 +78754,12 @@
.n(n)
.k(k)
.cn_stride(7)
- .Test(xnn_qs8_igemm_minmax_fp32_ukernel_1x4__scalar_magic, xnn_init_qs8_conv_minmax_fp32_scalar_magic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qs8_igemm_minmax_fp32_ukernel_1x4__scalar_fmagic, xnn_init_qs8_conv_minmax_fp32_scalar_fmagic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
}
-TEST(QS8_IGEMM_MINMAX_FP32_1X4__SCALAR_MAGIC, n_div_4_subtile) {
+TEST(QS8_IGEMM_MINMAX_FP32_1X4__SCALAR_FMAGIC, n_div_4_subtile) {
for (uint32_t n = 8; n <= 12; n += 4) {
for (size_t k = 1; k <= 5; k += 2) {
for (uint32_t m = 1; m <= 1; m++) {
@@ -78772,13 +78772,13 @@
.n(n)
.k(k)
.iterations(1)
- .Test(xnn_qs8_igemm_minmax_fp32_ukernel_1x4__scalar_magic, xnn_init_qs8_conv_minmax_fp32_scalar_magic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qs8_igemm_minmax_fp32_ukernel_1x4__scalar_fmagic, xnn_init_qs8_conv_minmax_fp32_scalar_fmagic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
}
}
-TEST(QS8_IGEMM_MINMAX_FP32_1X4__SCALAR_MAGIC, small_kernel) {
+TEST(QS8_IGEMM_MINMAX_FP32_1X4__SCALAR_FMAGIC, small_kernel) {
for (size_t k = 1; k <= 5; k += 2) {
GemmMicrokernelTester()
.mr(1)
@@ -78789,11 +78789,11 @@
.n(4)
.k(k)
.ks(3)
- .Test(xnn_qs8_igemm_minmax_fp32_ukernel_1x4__scalar_magic, xnn_init_qs8_conv_minmax_fp32_scalar_magic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qs8_igemm_minmax_fp32_ukernel_1x4__scalar_fmagic, xnn_init_qs8_conv_minmax_fp32_scalar_fmagic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
-TEST(QS8_IGEMM_MINMAX_FP32_1X4__SCALAR_MAGIC, small_kernel_subtile) {
+TEST(QS8_IGEMM_MINMAX_FP32_1X4__SCALAR_FMAGIC, small_kernel_subtile) {
for (size_t k = 1; k <= 5; k += 2) {
for (uint32_t m = 1; m <= 1; m++) {
for (uint32_t n = 1; n <= 4; n++) {
@@ -78807,13 +78807,13 @@
.k(k)
.ks(3)
.iterations(1)
- .Test(xnn_qs8_igemm_minmax_fp32_ukernel_1x4__scalar_magic, xnn_init_qs8_conv_minmax_fp32_scalar_magic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qs8_igemm_minmax_fp32_ukernel_1x4__scalar_fmagic, xnn_init_qs8_conv_minmax_fp32_scalar_fmagic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
}
}
-TEST(QS8_IGEMM_MINMAX_FP32_1X4__SCALAR_MAGIC, n_gt_4_small_kernel) {
+TEST(QS8_IGEMM_MINMAX_FP32_1X4__SCALAR_FMAGIC, n_gt_4_small_kernel) {
for (uint32_t n = 5; n < 8; n++) {
for (size_t k = 1; k <= 5; k += 2) {
GemmMicrokernelTester()
@@ -78825,12 +78825,12 @@
.n(4)
.k(k)
.ks(3)
- .Test(xnn_qs8_igemm_minmax_fp32_ukernel_1x4__scalar_magic, xnn_init_qs8_conv_minmax_fp32_scalar_magic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qs8_igemm_minmax_fp32_ukernel_1x4__scalar_fmagic, xnn_init_qs8_conv_minmax_fp32_scalar_fmagic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
}
-TEST(QS8_IGEMM_MINMAX_FP32_1X4__SCALAR_MAGIC, n_div_4_small_kernel) {
+TEST(QS8_IGEMM_MINMAX_FP32_1X4__SCALAR_FMAGIC, n_div_4_small_kernel) {
for (uint32_t n = 8; n <= 12; n += 4) {
for (size_t k = 1; k <= 5; k += 2) {
GemmMicrokernelTester()
@@ -78842,12 +78842,12 @@
.n(4)
.k(k)
.ks(3)
- .Test(xnn_qs8_igemm_minmax_fp32_ukernel_1x4__scalar_magic, xnn_init_qs8_conv_minmax_fp32_scalar_magic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qs8_igemm_minmax_fp32_ukernel_1x4__scalar_fmagic, xnn_init_qs8_conv_minmax_fp32_scalar_fmagic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
}
-TEST(QS8_IGEMM_MINMAX_FP32_1X4__SCALAR_MAGIC, strided_cm_subtile) {
+TEST(QS8_IGEMM_MINMAX_FP32_1X4__SCALAR_FMAGIC, strided_cm_subtile) {
for (size_t k = 1; k <= 5; k += 2) {
for (uint32_t m = 1; m <= 1; m++) {
for (uint32_t n = 1; n <= 4; n++) {
@@ -78861,13 +78861,13 @@
.k(k)
.cm_stride(7)
.iterations(1)
- .Test(xnn_qs8_igemm_minmax_fp32_ukernel_1x4__scalar_magic, xnn_init_qs8_conv_minmax_fp32_scalar_magic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qs8_igemm_minmax_fp32_ukernel_1x4__scalar_fmagic, xnn_init_qs8_conv_minmax_fp32_scalar_fmagic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
}
}
-TEST(QS8_IGEMM_MINMAX_FP32_1X4__SCALAR_MAGIC, a_offset) {
+TEST(QS8_IGEMM_MINMAX_FP32_1X4__SCALAR_FMAGIC, a_offset) {
for (size_t k = 1; k <= 5; k += 2) {
GemmMicrokernelTester()
.mr(1)
@@ -78879,11 +78879,11 @@
.k(k)
.ks(3)
.a_offset(7)
- .Test(xnn_qs8_igemm_minmax_fp32_ukernel_1x4__scalar_magic, xnn_init_qs8_conv_minmax_fp32_scalar_magic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qs8_igemm_minmax_fp32_ukernel_1x4__scalar_fmagic, xnn_init_qs8_conv_minmax_fp32_scalar_fmagic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
-TEST(QS8_IGEMM_MINMAX_FP32_1X4__SCALAR_MAGIC, zero) {
+TEST(QS8_IGEMM_MINMAX_FP32_1X4__SCALAR_FMAGIC, zero) {
for (uint32_t mz = 0; mz < 1; mz++) {
for (size_t k = 1; k <= 5; k += 2) {
GemmMicrokernelTester()
@@ -78897,12 +78897,12 @@
.ks(3)
.a_offset(7)
.zero_index(mz)
- .Test(xnn_qs8_igemm_minmax_fp32_ukernel_1x4__scalar_magic, xnn_init_qs8_conv_minmax_fp32_scalar_magic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qs8_igemm_minmax_fp32_ukernel_1x4__scalar_fmagic, xnn_init_qs8_conv_minmax_fp32_scalar_fmagic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
}
-TEST(QS8_IGEMM_MINMAX_FP32_1X4__SCALAR_MAGIC, qmin) {
+TEST(QS8_IGEMM_MINMAX_FP32_1X4__SCALAR_FMAGIC, qmin) {
GemmMicrokernelTester()
.mr(1)
.nr(4)
@@ -78912,10 +78912,10 @@
.n(4)
.k(1)
.qmin(128)
- .Test(xnn_qs8_igemm_minmax_fp32_ukernel_1x4__scalar_magic, xnn_init_qs8_conv_minmax_fp32_scalar_magic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qs8_igemm_minmax_fp32_ukernel_1x4__scalar_fmagic, xnn_init_qs8_conv_minmax_fp32_scalar_fmagic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
-TEST(QS8_IGEMM_MINMAX_FP32_1X4__SCALAR_MAGIC, qmax) {
+TEST(QS8_IGEMM_MINMAX_FP32_1X4__SCALAR_FMAGIC, qmax) {
GemmMicrokernelTester()
.mr(1)
.nr(4)
@@ -78925,10 +78925,10 @@
.n(4)
.k(1)
.qmax(128)
- .Test(xnn_qs8_igemm_minmax_fp32_ukernel_1x4__scalar_magic, xnn_init_qs8_conv_minmax_fp32_scalar_magic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qs8_igemm_minmax_fp32_ukernel_1x4__scalar_fmagic, xnn_init_qs8_conv_minmax_fp32_scalar_fmagic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
-TEST(QS8_IGEMM_MINMAX_FP32_1X4__SCALAR_MAGIC, strided_cm) {
+TEST(QS8_IGEMM_MINMAX_FP32_1X4__SCALAR_FMAGIC, strided_cm) {
GemmMicrokernelTester()
.mr(1)
.nr(4)
@@ -78938,11 +78938,11 @@
.n(4)
.k(1)
.cm_stride(7)
- .Test(xnn_qs8_igemm_minmax_fp32_ukernel_1x4__scalar_magic, xnn_init_qs8_conv_minmax_fp32_scalar_magic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qs8_igemm_minmax_fp32_ukernel_1x4__scalar_fmagic, xnn_init_qs8_conv_minmax_fp32_scalar_fmagic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
-TEST(QS8_IGEMM_MINMAX_FP32_2X4__SCALAR_MAGIC, k_eq_1) {
+TEST(QS8_IGEMM_MINMAX_FP32_2X4__SCALAR_FMAGIC, k_eq_1) {
GemmMicrokernelTester()
.mr(2)
.nr(4)
@@ -78951,10 +78951,10 @@
.m(2)
.n(4)
.k(1)
- .Test(xnn_qs8_igemm_minmax_fp32_ukernel_2x4__scalar_magic, xnn_init_qs8_conv_minmax_fp32_scalar_magic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qs8_igemm_minmax_fp32_ukernel_2x4__scalar_fmagic, xnn_init_qs8_conv_minmax_fp32_scalar_fmagic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
-TEST(QS8_IGEMM_MINMAX_FP32_2X4__SCALAR_MAGIC, strided_cn) {
+TEST(QS8_IGEMM_MINMAX_FP32_2X4__SCALAR_FMAGIC, strided_cn) {
GemmMicrokernelTester()
.mr(2)
.nr(4)
@@ -78964,10 +78964,10 @@
.n(4)
.k(1)
.cn_stride(7)
- .Test(xnn_qs8_igemm_minmax_fp32_ukernel_2x4__scalar_magic, xnn_init_qs8_conv_minmax_fp32_scalar_magic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qs8_igemm_minmax_fp32_ukernel_2x4__scalar_fmagic, xnn_init_qs8_conv_minmax_fp32_scalar_fmagic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
-TEST(QS8_IGEMM_MINMAX_FP32_2X4__SCALAR_MAGIC, k_eq_1_subtile) {
+TEST(QS8_IGEMM_MINMAX_FP32_2X4__SCALAR_FMAGIC, k_eq_1_subtile) {
for (uint32_t m = 1; m <= 2; m++) {
for (uint32_t n = 1; n <= 4; n++) {
GemmMicrokernelTester()
@@ -78979,12 +78979,12 @@
.n(n)
.k(1)
.iterations(1)
- .Test(xnn_qs8_igemm_minmax_fp32_ukernel_2x4__scalar_magic, xnn_init_qs8_conv_minmax_fp32_scalar_magic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qs8_igemm_minmax_fp32_ukernel_2x4__scalar_fmagic, xnn_init_qs8_conv_minmax_fp32_scalar_fmagic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
}
-TEST(QS8_IGEMM_MINMAX_FP32_2X4__SCALAR_MAGIC, k_eq_1_subtile_m) {
+TEST(QS8_IGEMM_MINMAX_FP32_2X4__SCALAR_FMAGIC, k_eq_1_subtile_m) {
for (uint32_t m = 1; m <= 2; m++) {
GemmMicrokernelTester()
.mr(2)
@@ -78995,11 +78995,11 @@
.n(4)
.k(1)
.iterations(1)
- .Test(xnn_qs8_igemm_minmax_fp32_ukernel_2x4__scalar_magic, xnn_init_qs8_conv_minmax_fp32_scalar_magic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qs8_igemm_minmax_fp32_ukernel_2x4__scalar_fmagic, xnn_init_qs8_conv_minmax_fp32_scalar_fmagic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
-TEST(QS8_IGEMM_MINMAX_FP32_2X4__SCALAR_MAGIC, k_eq_1_subtile_n) {
+TEST(QS8_IGEMM_MINMAX_FP32_2X4__SCALAR_FMAGIC, k_eq_1_subtile_n) {
for (uint32_t n = 1; n <= 4; n++) {
GemmMicrokernelTester()
.mr(2)
@@ -79010,11 +79010,11 @@
.n(n)
.k(1)
.iterations(1)
- .Test(xnn_qs8_igemm_minmax_fp32_ukernel_2x4__scalar_magic, xnn_init_qs8_conv_minmax_fp32_scalar_magic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qs8_igemm_minmax_fp32_ukernel_2x4__scalar_fmagic, xnn_init_qs8_conv_minmax_fp32_scalar_fmagic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
-TEST(QS8_IGEMM_MINMAX_FP32_2X4__SCALAR_MAGIC, k_gt_1) {
+TEST(QS8_IGEMM_MINMAX_FP32_2X4__SCALAR_FMAGIC, k_gt_1) {
for (size_t k = 2; k < 10; k++) {
GemmMicrokernelTester()
.mr(2)
@@ -79024,11 +79024,11 @@
.m(2)
.n(4)
.k(k)
- .Test(xnn_qs8_igemm_minmax_fp32_ukernel_2x4__scalar_magic, xnn_init_qs8_conv_minmax_fp32_scalar_magic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qs8_igemm_minmax_fp32_ukernel_2x4__scalar_fmagic, xnn_init_qs8_conv_minmax_fp32_scalar_fmagic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
-TEST(QS8_IGEMM_MINMAX_FP32_2X4__SCALAR_MAGIC, k_gt_1_subtile) {
+TEST(QS8_IGEMM_MINMAX_FP32_2X4__SCALAR_FMAGIC, k_gt_1_subtile) {
for (size_t k = 2; k < 10; k++) {
for (uint32_t m = 1; m <= 2; m++) {
for (uint32_t n = 1; n <= 4; n++) {
@@ -79041,13 +79041,13 @@
.n(n)
.k(k)
.iterations(1)
- .Test(xnn_qs8_igemm_minmax_fp32_ukernel_2x4__scalar_magic, xnn_init_qs8_conv_minmax_fp32_scalar_magic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qs8_igemm_minmax_fp32_ukernel_2x4__scalar_fmagic, xnn_init_qs8_conv_minmax_fp32_scalar_fmagic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
}
}
-TEST(QS8_IGEMM_MINMAX_FP32_2X4__SCALAR_MAGIC, n_gt_4) {
+TEST(QS8_IGEMM_MINMAX_FP32_2X4__SCALAR_FMAGIC, n_gt_4) {
for (uint32_t n = 5; n < 8; n++) {
for (size_t k = 1; k <= 5; k += 2) {
GemmMicrokernelTester()
@@ -79058,12 +79058,12 @@
.m(2)
.n(4)
.k(k)
- .Test(xnn_qs8_igemm_minmax_fp32_ukernel_2x4__scalar_magic, xnn_init_qs8_conv_minmax_fp32_scalar_magic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qs8_igemm_minmax_fp32_ukernel_2x4__scalar_fmagic, xnn_init_qs8_conv_minmax_fp32_scalar_fmagic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
}
-TEST(QS8_IGEMM_MINMAX_FP32_2X4__SCALAR_MAGIC, n_gt_4_strided_cn) {
+TEST(QS8_IGEMM_MINMAX_FP32_2X4__SCALAR_FMAGIC, n_gt_4_strided_cn) {
for (uint32_t n = 5; n < 8; n++) {
for (size_t k = 1; k <= 5; k += 2) {
GemmMicrokernelTester()
@@ -79075,12 +79075,12 @@
.n(4)
.k(k)
.cn_stride(7)
- .Test(xnn_qs8_igemm_minmax_fp32_ukernel_2x4__scalar_magic, xnn_init_qs8_conv_minmax_fp32_scalar_magic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qs8_igemm_minmax_fp32_ukernel_2x4__scalar_fmagic, xnn_init_qs8_conv_minmax_fp32_scalar_fmagic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
}
-TEST(QS8_IGEMM_MINMAX_FP32_2X4__SCALAR_MAGIC, n_gt_4_subtile) {
+TEST(QS8_IGEMM_MINMAX_FP32_2X4__SCALAR_FMAGIC, n_gt_4_subtile) {
for (uint32_t n = 5; n < 8; n++) {
for (size_t k = 1; k <= 5; k += 2) {
for (uint32_t m = 1; m <= 2; m++) {
@@ -79093,13 +79093,13 @@
.n(n)
.k(k)
.iterations(1)
- .Test(xnn_qs8_igemm_minmax_fp32_ukernel_2x4__scalar_magic, xnn_init_qs8_conv_minmax_fp32_scalar_magic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qs8_igemm_minmax_fp32_ukernel_2x4__scalar_fmagic, xnn_init_qs8_conv_minmax_fp32_scalar_fmagic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
}
}
-TEST(QS8_IGEMM_MINMAX_FP32_2X4__SCALAR_MAGIC, n_div_4) {
+TEST(QS8_IGEMM_MINMAX_FP32_2X4__SCALAR_FMAGIC, n_div_4) {
for (uint32_t n = 8; n <= 12; n += 4) {
for (size_t k = 1; k <= 5; k += 2) {
GemmMicrokernelTester()
@@ -79110,12 +79110,12 @@
.m(2)
.n(4)
.k(k)
- .Test(xnn_qs8_igemm_minmax_fp32_ukernel_2x4__scalar_magic, xnn_init_qs8_conv_minmax_fp32_scalar_magic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qs8_igemm_minmax_fp32_ukernel_2x4__scalar_fmagic, xnn_init_qs8_conv_minmax_fp32_scalar_fmagic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
}
-TEST(QS8_IGEMM_MINMAX_FP32_2X4__SCALAR_MAGIC, n_div_4_strided_cn) {
+TEST(QS8_IGEMM_MINMAX_FP32_2X4__SCALAR_FMAGIC, n_div_4_strided_cn) {
for (uint32_t n = 8; n <= 12; n += 4) {
for (size_t k = 1; k <= 5; k += 2) {
GemmMicrokernelTester()
@@ -79127,12 +79127,12 @@
.n(n)
.k(k)
.cn_stride(7)
- .Test(xnn_qs8_igemm_minmax_fp32_ukernel_2x4__scalar_magic, xnn_init_qs8_conv_minmax_fp32_scalar_magic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qs8_igemm_minmax_fp32_ukernel_2x4__scalar_fmagic, xnn_init_qs8_conv_minmax_fp32_scalar_fmagic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
}
-TEST(QS8_IGEMM_MINMAX_FP32_2X4__SCALAR_MAGIC, n_div_4_subtile) {
+TEST(QS8_IGEMM_MINMAX_FP32_2X4__SCALAR_FMAGIC, n_div_4_subtile) {
for (uint32_t n = 8; n <= 12; n += 4) {
for (size_t k = 1; k <= 5; k += 2) {
for (uint32_t m = 1; m <= 2; m++) {
@@ -79145,13 +79145,13 @@
.n(n)
.k(k)
.iterations(1)
- .Test(xnn_qs8_igemm_minmax_fp32_ukernel_2x4__scalar_magic, xnn_init_qs8_conv_minmax_fp32_scalar_magic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qs8_igemm_minmax_fp32_ukernel_2x4__scalar_fmagic, xnn_init_qs8_conv_minmax_fp32_scalar_fmagic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
}
}
-TEST(QS8_IGEMM_MINMAX_FP32_2X4__SCALAR_MAGIC, small_kernel) {
+TEST(QS8_IGEMM_MINMAX_FP32_2X4__SCALAR_FMAGIC, small_kernel) {
for (size_t k = 1; k <= 5; k += 2) {
GemmMicrokernelTester()
.mr(2)
@@ -79162,11 +79162,11 @@
.n(4)
.k(k)
.ks(3)
- .Test(xnn_qs8_igemm_minmax_fp32_ukernel_2x4__scalar_magic, xnn_init_qs8_conv_minmax_fp32_scalar_magic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qs8_igemm_minmax_fp32_ukernel_2x4__scalar_fmagic, xnn_init_qs8_conv_minmax_fp32_scalar_fmagic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
-TEST(QS8_IGEMM_MINMAX_FP32_2X4__SCALAR_MAGIC, small_kernel_subtile) {
+TEST(QS8_IGEMM_MINMAX_FP32_2X4__SCALAR_FMAGIC, small_kernel_subtile) {
for (size_t k = 1; k <= 5; k += 2) {
for (uint32_t m = 1; m <= 2; m++) {
for (uint32_t n = 1; n <= 4; n++) {
@@ -79180,13 +79180,13 @@
.k(k)
.ks(3)
.iterations(1)
- .Test(xnn_qs8_igemm_minmax_fp32_ukernel_2x4__scalar_magic, xnn_init_qs8_conv_minmax_fp32_scalar_magic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qs8_igemm_minmax_fp32_ukernel_2x4__scalar_fmagic, xnn_init_qs8_conv_minmax_fp32_scalar_fmagic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
}
}
-TEST(QS8_IGEMM_MINMAX_FP32_2X4__SCALAR_MAGIC, n_gt_4_small_kernel) {
+TEST(QS8_IGEMM_MINMAX_FP32_2X4__SCALAR_FMAGIC, n_gt_4_small_kernel) {
for (uint32_t n = 5; n < 8; n++) {
for (size_t k = 1; k <= 5; k += 2) {
GemmMicrokernelTester()
@@ -79198,12 +79198,12 @@
.n(4)
.k(k)
.ks(3)
- .Test(xnn_qs8_igemm_minmax_fp32_ukernel_2x4__scalar_magic, xnn_init_qs8_conv_minmax_fp32_scalar_magic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qs8_igemm_minmax_fp32_ukernel_2x4__scalar_fmagic, xnn_init_qs8_conv_minmax_fp32_scalar_fmagic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
}
-TEST(QS8_IGEMM_MINMAX_FP32_2X4__SCALAR_MAGIC, n_div_4_small_kernel) {
+TEST(QS8_IGEMM_MINMAX_FP32_2X4__SCALAR_FMAGIC, n_div_4_small_kernel) {
for (uint32_t n = 8; n <= 12; n += 4) {
for (size_t k = 1; k <= 5; k += 2) {
GemmMicrokernelTester()
@@ -79215,12 +79215,12 @@
.n(4)
.k(k)
.ks(3)
- .Test(xnn_qs8_igemm_minmax_fp32_ukernel_2x4__scalar_magic, xnn_init_qs8_conv_minmax_fp32_scalar_magic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qs8_igemm_minmax_fp32_ukernel_2x4__scalar_fmagic, xnn_init_qs8_conv_minmax_fp32_scalar_fmagic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
}
-TEST(QS8_IGEMM_MINMAX_FP32_2X4__SCALAR_MAGIC, strided_cm_subtile) {
+TEST(QS8_IGEMM_MINMAX_FP32_2X4__SCALAR_FMAGIC, strided_cm_subtile) {
for (size_t k = 1; k <= 5; k += 2) {
for (uint32_t m = 1; m <= 2; m++) {
for (uint32_t n = 1; n <= 4; n++) {
@@ -79234,13 +79234,13 @@
.k(k)
.cm_stride(7)
.iterations(1)
- .Test(xnn_qs8_igemm_minmax_fp32_ukernel_2x4__scalar_magic, xnn_init_qs8_conv_minmax_fp32_scalar_magic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qs8_igemm_minmax_fp32_ukernel_2x4__scalar_fmagic, xnn_init_qs8_conv_minmax_fp32_scalar_fmagic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
}
}
-TEST(QS8_IGEMM_MINMAX_FP32_2X4__SCALAR_MAGIC, a_offset) {
+TEST(QS8_IGEMM_MINMAX_FP32_2X4__SCALAR_FMAGIC, a_offset) {
for (size_t k = 1; k <= 5; k += 2) {
GemmMicrokernelTester()
.mr(2)
@@ -79252,11 +79252,11 @@
.k(k)
.ks(3)
.a_offset(13)
- .Test(xnn_qs8_igemm_minmax_fp32_ukernel_2x4__scalar_magic, xnn_init_qs8_conv_minmax_fp32_scalar_magic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qs8_igemm_minmax_fp32_ukernel_2x4__scalar_fmagic, xnn_init_qs8_conv_minmax_fp32_scalar_fmagic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
-TEST(QS8_IGEMM_MINMAX_FP32_2X4__SCALAR_MAGIC, zero) {
+TEST(QS8_IGEMM_MINMAX_FP32_2X4__SCALAR_FMAGIC, zero) {
for (uint32_t mz = 0; mz < 2; mz++) {
for (size_t k = 1; k <= 5; k += 2) {
GemmMicrokernelTester()
@@ -79270,12 +79270,12 @@
.ks(3)
.a_offset(13)
.zero_index(mz)
- .Test(xnn_qs8_igemm_minmax_fp32_ukernel_2x4__scalar_magic, xnn_init_qs8_conv_minmax_fp32_scalar_magic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qs8_igemm_minmax_fp32_ukernel_2x4__scalar_fmagic, xnn_init_qs8_conv_minmax_fp32_scalar_fmagic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
}
-TEST(QS8_IGEMM_MINMAX_FP32_2X4__SCALAR_MAGIC, qmin) {
+TEST(QS8_IGEMM_MINMAX_FP32_2X4__SCALAR_FMAGIC, qmin) {
GemmMicrokernelTester()
.mr(2)
.nr(4)
@@ -79285,10 +79285,10 @@
.n(4)
.k(1)
.qmin(128)
- .Test(xnn_qs8_igemm_minmax_fp32_ukernel_2x4__scalar_magic, xnn_init_qs8_conv_minmax_fp32_scalar_magic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qs8_igemm_minmax_fp32_ukernel_2x4__scalar_fmagic, xnn_init_qs8_conv_minmax_fp32_scalar_fmagic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
-TEST(QS8_IGEMM_MINMAX_FP32_2X4__SCALAR_MAGIC, qmax) {
+TEST(QS8_IGEMM_MINMAX_FP32_2X4__SCALAR_FMAGIC, qmax) {
GemmMicrokernelTester()
.mr(2)
.nr(4)
@@ -79298,10 +79298,10 @@
.n(4)
.k(1)
.qmax(128)
- .Test(xnn_qs8_igemm_minmax_fp32_ukernel_2x4__scalar_magic, xnn_init_qs8_conv_minmax_fp32_scalar_magic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qs8_igemm_minmax_fp32_ukernel_2x4__scalar_fmagic, xnn_init_qs8_conv_minmax_fp32_scalar_fmagic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
-TEST(QS8_IGEMM_MINMAX_FP32_2X4__SCALAR_MAGIC, strided_cm) {
+TEST(QS8_IGEMM_MINMAX_FP32_2X4__SCALAR_FMAGIC, strided_cm) {
GemmMicrokernelTester()
.mr(2)
.nr(4)
@@ -79311,11 +79311,11 @@
.n(4)
.k(1)
.cm_stride(7)
- .Test(xnn_qs8_igemm_minmax_fp32_ukernel_2x4__scalar_magic, xnn_init_qs8_conv_minmax_fp32_scalar_magic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qs8_igemm_minmax_fp32_ukernel_2x4__scalar_fmagic, xnn_init_qs8_conv_minmax_fp32_scalar_fmagic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
-TEST(QS8_IGEMM_MINMAX_FP32_3X4__SCALAR_MAGIC, k_eq_1) {
+TEST(QS8_IGEMM_MINMAX_FP32_3X4__SCALAR_FMAGIC, k_eq_1) {
GemmMicrokernelTester()
.mr(3)
.nr(4)
@@ -79324,10 +79324,10 @@
.m(3)
.n(4)
.k(1)
- .Test(xnn_qs8_igemm_minmax_fp32_ukernel_3x4__scalar_magic, xnn_init_qs8_conv_minmax_fp32_scalar_magic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qs8_igemm_minmax_fp32_ukernel_3x4__scalar_fmagic, xnn_init_qs8_conv_minmax_fp32_scalar_fmagic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
-TEST(QS8_IGEMM_MINMAX_FP32_3X4__SCALAR_MAGIC, strided_cn) {
+TEST(QS8_IGEMM_MINMAX_FP32_3X4__SCALAR_FMAGIC, strided_cn) {
GemmMicrokernelTester()
.mr(3)
.nr(4)
@@ -79337,10 +79337,10 @@
.n(4)
.k(1)
.cn_stride(7)
- .Test(xnn_qs8_igemm_minmax_fp32_ukernel_3x4__scalar_magic, xnn_init_qs8_conv_minmax_fp32_scalar_magic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qs8_igemm_minmax_fp32_ukernel_3x4__scalar_fmagic, xnn_init_qs8_conv_minmax_fp32_scalar_fmagic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
-TEST(QS8_IGEMM_MINMAX_FP32_3X4__SCALAR_MAGIC, k_eq_1_subtile) {
+TEST(QS8_IGEMM_MINMAX_FP32_3X4__SCALAR_FMAGIC, k_eq_1_subtile) {
for (uint32_t m = 1; m <= 3; m++) {
for (uint32_t n = 1; n <= 4; n++) {
GemmMicrokernelTester()
@@ -79352,12 +79352,12 @@
.n(n)
.k(1)
.iterations(1)
- .Test(xnn_qs8_igemm_minmax_fp32_ukernel_3x4__scalar_magic, xnn_init_qs8_conv_minmax_fp32_scalar_magic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qs8_igemm_minmax_fp32_ukernel_3x4__scalar_fmagic, xnn_init_qs8_conv_minmax_fp32_scalar_fmagic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
}
-TEST(QS8_IGEMM_MINMAX_FP32_3X4__SCALAR_MAGIC, k_eq_1_subtile_m) {
+TEST(QS8_IGEMM_MINMAX_FP32_3X4__SCALAR_FMAGIC, k_eq_1_subtile_m) {
for (uint32_t m = 1; m <= 3; m++) {
GemmMicrokernelTester()
.mr(3)
@@ -79368,11 +79368,11 @@
.n(4)
.k(1)
.iterations(1)
- .Test(xnn_qs8_igemm_minmax_fp32_ukernel_3x4__scalar_magic, xnn_init_qs8_conv_minmax_fp32_scalar_magic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qs8_igemm_minmax_fp32_ukernel_3x4__scalar_fmagic, xnn_init_qs8_conv_minmax_fp32_scalar_fmagic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
-TEST(QS8_IGEMM_MINMAX_FP32_3X4__SCALAR_MAGIC, k_eq_1_subtile_n) {
+TEST(QS8_IGEMM_MINMAX_FP32_3X4__SCALAR_FMAGIC, k_eq_1_subtile_n) {
for (uint32_t n = 1; n <= 4; n++) {
GemmMicrokernelTester()
.mr(3)
@@ -79383,11 +79383,11 @@
.n(n)
.k(1)
.iterations(1)
- .Test(xnn_qs8_igemm_minmax_fp32_ukernel_3x4__scalar_magic, xnn_init_qs8_conv_minmax_fp32_scalar_magic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qs8_igemm_minmax_fp32_ukernel_3x4__scalar_fmagic, xnn_init_qs8_conv_minmax_fp32_scalar_fmagic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
-TEST(QS8_IGEMM_MINMAX_FP32_3X4__SCALAR_MAGIC, k_gt_1) {
+TEST(QS8_IGEMM_MINMAX_FP32_3X4__SCALAR_FMAGIC, k_gt_1) {
for (size_t k = 2; k < 10; k++) {
GemmMicrokernelTester()
.mr(3)
@@ -79397,11 +79397,11 @@
.m(3)
.n(4)
.k(k)
- .Test(xnn_qs8_igemm_minmax_fp32_ukernel_3x4__scalar_magic, xnn_init_qs8_conv_minmax_fp32_scalar_magic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qs8_igemm_minmax_fp32_ukernel_3x4__scalar_fmagic, xnn_init_qs8_conv_minmax_fp32_scalar_fmagic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
-TEST(QS8_IGEMM_MINMAX_FP32_3X4__SCALAR_MAGIC, k_gt_1_subtile) {
+TEST(QS8_IGEMM_MINMAX_FP32_3X4__SCALAR_FMAGIC, k_gt_1_subtile) {
for (size_t k = 2; k < 10; k++) {
for (uint32_t m = 1; m <= 3; m++) {
for (uint32_t n = 1; n <= 4; n++) {
@@ -79414,13 +79414,13 @@
.n(n)
.k(k)
.iterations(1)
- .Test(xnn_qs8_igemm_minmax_fp32_ukernel_3x4__scalar_magic, xnn_init_qs8_conv_minmax_fp32_scalar_magic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qs8_igemm_minmax_fp32_ukernel_3x4__scalar_fmagic, xnn_init_qs8_conv_minmax_fp32_scalar_fmagic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
}
}
-TEST(QS8_IGEMM_MINMAX_FP32_3X4__SCALAR_MAGIC, n_gt_4) {
+TEST(QS8_IGEMM_MINMAX_FP32_3X4__SCALAR_FMAGIC, n_gt_4) {
for (uint32_t n = 5; n < 8; n++) {
for (size_t k = 1; k <= 5; k += 2) {
GemmMicrokernelTester()
@@ -79431,12 +79431,12 @@
.m(3)
.n(4)
.k(k)
- .Test(xnn_qs8_igemm_minmax_fp32_ukernel_3x4__scalar_magic, xnn_init_qs8_conv_minmax_fp32_scalar_magic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qs8_igemm_minmax_fp32_ukernel_3x4__scalar_fmagic, xnn_init_qs8_conv_minmax_fp32_scalar_fmagic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
}
-TEST(QS8_IGEMM_MINMAX_FP32_3X4__SCALAR_MAGIC, n_gt_4_strided_cn) {
+TEST(QS8_IGEMM_MINMAX_FP32_3X4__SCALAR_FMAGIC, n_gt_4_strided_cn) {
for (uint32_t n = 5; n < 8; n++) {
for (size_t k = 1; k <= 5; k += 2) {
GemmMicrokernelTester()
@@ -79448,12 +79448,12 @@
.n(4)
.k(k)
.cn_stride(7)
- .Test(xnn_qs8_igemm_minmax_fp32_ukernel_3x4__scalar_magic, xnn_init_qs8_conv_minmax_fp32_scalar_magic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qs8_igemm_minmax_fp32_ukernel_3x4__scalar_fmagic, xnn_init_qs8_conv_minmax_fp32_scalar_fmagic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
}
-TEST(QS8_IGEMM_MINMAX_FP32_3X4__SCALAR_MAGIC, n_gt_4_subtile) {
+TEST(QS8_IGEMM_MINMAX_FP32_3X4__SCALAR_FMAGIC, n_gt_4_subtile) {
for (uint32_t n = 5; n < 8; n++) {
for (size_t k = 1; k <= 5; k += 2) {
for (uint32_t m = 1; m <= 3; m++) {
@@ -79466,13 +79466,13 @@
.n(n)
.k(k)
.iterations(1)
- .Test(xnn_qs8_igemm_minmax_fp32_ukernel_3x4__scalar_magic, xnn_init_qs8_conv_minmax_fp32_scalar_magic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qs8_igemm_minmax_fp32_ukernel_3x4__scalar_fmagic, xnn_init_qs8_conv_minmax_fp32_scalar_fmagic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
}
}
-TEST(QS8_IGEMM_MINMAX_FP32_3X4__SCALAR_MAGIC, n_div_4) {
+TEST(QS8_IGEMM_MINMAX_FP32_3X4__SCALAR_FMAGIC, n_div_4) {
for (uint32_t n = 8; n <= 12; n += 4) {
for (size_t k = 1; k <= 5; k += 2) {
GemmMicrokernelTester()
@@ -79483,12 +79483,12 @@
.m(3)
.n(4)
.k(k)
- .Test(xnn_qs8_igemm_minmax_fp32_ukernel_3x4__scalar_magic, xnn_init_qs8_conv_minmax_fp32_scalar_magic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qs8_igemm_minmax_fp32_ukernel_3x4__scalar_fmagic, xnn_init_qs8_conv_minmax_fp32_scalar_fmagic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
}
-TEST(QS8_IGEMM_MINMAX_FP32_3X4__SCALAR_MAGIC, n_div_4_strided_cn) {
+TEST(QS8_IGEMM_MINMAX_FP32_3X4__SCALAR_FMAGIC, n_div_4_strided_cn) {
for (uint32_t n = 8; n <= 12; n += 4) {
for (size_t k = 1; k <= 5; k += 2) {
GemmMicrokernelTester()
@@ -79500,12 +79500,12 @@
.n(n)
.k(k)
.cn_stride(7)
- .Test(xnn_qs8_igemm_minmax_fp32_ukernel_3x4__scalar_magic, xnn_init_qs8_conv_minmax_fp32_scalar_magic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qs8_igemm_minmax_fp32_ukernel_3x4__scalar_fmagic, xnn_init_qs8_conv_minmax_fp32_scalar_fmagic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
}
-TEST(QS8_IGEMM_MINMAX_FP32_3X4__SCALAR_MAGIC, n_div_4_subtile) {
+TEST(QS8_IGEMM_MINMAX_FP32_3X4__SCALAR_FMAGIC, n_div_4_subtile) {
for (uint32_t n = 8; n <= 12; n += 4) {
for (size_t k = 1; k <= 5; k += 2) {
for (uint32_t m = 1; m <= 3; m++) {
@@ -79518,13 +79518,13 @@
.n(n)
.k(k)
.iterations(1)
- .Test(xnn_qs8_igemm_minmax_fp32_ukernel_3x4__scalar_magic, xnn_init_qs8_conv_minmax_fp32_scalar_magic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qs8_igemm_minmax_fp32_ukernel_3x4__scalar_fmagic, xnn_init_qs8_conv_minmax_fp32_scalar_fmagic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
}
}
-TEST(QS8_IGEMM_MINMAX_FP32_3X4__SCALAR_MAGIC, small_kernel) {
+TEST(QS8_IGEMM_MINMAX_FP32_3X4__SCALAR_FMAGIC, small_kernel) {
for (size_t k = 1; k <= 5; k += 2) {
GemmMicrokernelTester()
.mr(3)
@@ -79535,11 +79535,11 @@
.n(4)
.k(k)
.ks(3)
- .Test(xnn_qs8_igemm_minmax_fp32_ukernel_3x4__scalar_magic, xnn_init_qs8_conv_minmax_fp32_scalar_magic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qs8_igemm_minmax_fp32_ukernel_3x4__scalar_fmagic, xnn_init_qs8_conv_minmax_fp32_scalar_fmagic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
-TEST(QS8_IGEMM_MINMAX_FP32_3X4__SCALAR_MAGIC, small_kernel_subtile) {
+TEST(QS8_IGEMM_MINMAX_FP32_3X4__SCALAR_FMAGIC, small_kernel_subtile) {
for (size_t k = 1; k <= 5; k += 2) {
for (uint32_t m = 1; m <= 3; m++) {
for (uint32_t n = 1; n <= 4; n++) {
@@ -79553,13 +79553,13 @@
.k(k)
.ks(3)
.iterations(1)
- .Test(xnn_qs8_igemm_minmax_fp32_ukernel_3x4__scalar_magic, xnn_init_qs8_conv_minmax_fp32_scalar_magic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qs8_igemm_minmax_fp32_ukernel_3x4__scalar_fmagic, xnn_init_qs8_conv_minmax_fp32_scalar_fmagic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
}
}
-TEST(QS8_IGEMM_MINMAX_FP32_3X4__SCALAR_MAGIC, n_gt_4_small_kernel) {
+TEST(QS8_IGEMM_MINMAX_FP32_3X4__SCALAR_FMAGIC, n_gt_4_small_kernel) {
for (uint32_t n = 5; n < 8; n++) {
for (size_t k = 1; k <= 5; k += 2) {
GemmMicrokernelTester()
@@ -79571,12 +79571,12 @@
.n(4)
.k(k)
.ks(3)
- .Test(xnn_qs8_igemm_minmax_fp32_ukernel_3x4__scalar_magic, xnn_init_qs8_conv_minmax_fp32_scalar_magic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qs8_igemm_minmax_fp32_ukernel_3x4__scalar_fmagic, xnn_init_qs8_conv_minmax_fp32_scalar_fmagic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
}
-TEST(QS8_IGEMM_MINMAX_FP32_3X4__SCALAR_MAGIC, n_div_4_small_kernel) {
+TEST(QS8_IGEMM_MINMAX_FP32_3X4__SCALAR_FMAGIC, n_div_4_small_kernel) {
for (uint32_t n = 8; n <= 12; n += 4) {
for (size_t k = 1; k <= 5; k += 2) {
GemmMicrokernelTester()
@@ -79588,12 +79588,12 @@
.n(4)
.k(k)
.ks(3)
- .Test(xnn_qs8_igemm_minmax_fp32_ukernel_3x4__scalar_magic, xnn_init_qs8_conv_minmax_fp32_scalar_magic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qs8_igemm_minmax_fp32_ukernel_3x4__scalar_fmagic, xnn_init_qs8_conv_minmax_fp32_scalar_fmagic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
}
-TEST(QS8_IGEMM_MINMAX_FP32_3X4__SCALAR_MAGIC, strided_cm_subtile) {
+TEST(QS8_IGEMM_MINMAX_FP32_3X4__SCALAR_FMAGIC, strided_cm_subtile) {
for (size_t k = 1; k <= 5; k += 2) {
for (uint32_t m = 1; m <= 3; m++) {
for (uint32_t n = 1; n <= 4; n++) {
@@ -79607,13 +79607,13 @@
.k(k)
.cm_stride(7)
.iterations(1)
- .Test(xnn_qs8_igemm_minmax_fp32_ukernel_3x4__scalar_magic, xnn_init_qs8_conv_minmax_fp32_scalar_magic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qs8_igemm_minmax_fp32_ukernel_3x4__scalar_fmagic, xnn_init_qs8_conv_minmax_fp32_scalar_fmagic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
}
}
-TEST(QS8_IGEMM_MINMAX_FP32_3X4__SCALAR_MAGIC, a_offset) {
+TEST(QS8_IGEMM_MINMAX_FP32_3X4__SCALAR_FMAGIC, a_offset) {
for (size_t k = 1; k <= 5; k += 2) {
GemmMicrokernelTester()
.mr(3)
@@ -79625,11 +79625,11 @@
.k(k)
.ks(3)
.a_offset(17)
- .Test(xnn_qs8_igemm_minmax_fp32_ukernel_3x4__scalar_magic, xnn_init_qs8_conv_minmax_fp32_scalar_magic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qs8_igemm_minmax_fp32_ukernel_3x4__scalar_fmagic, xnn_init_qs8_conv_minmax_fp32_scalar_fmagic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
-TEST(QS8_IGEMM_MINMAX_FP32_3X4__SCALAR_MAGIC, zero) {
+TEST(QS8_IGEMM_MINMAX_FP32_3X4__SCALAR_FMAGIC, zero) {
for (uint32_t mz = 0; mz < 3; mz++) {
for (size_t k = 1; k <= 5; k += 2) {
GemmMicrokernelTester()
@@ -79643,12 +79643,12 @@
.ks(3)
.a_offset(17)
.zero_index(mz)
- .Test(xnn_qs8_igemm_minmax_fp32_ukernel_3x4__scalar_magic, xnn_init_qs8_conv_minmax_fp32_scalar_magic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qs8_igemm_minmax_fp32_ukernel_3x4__scalar_fmagic, xnn_init_qs8_conv_minmax_fp32_scalar_fmagic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
}
-TEST(QS8_IGEMM_MINMAX_FP32_3X4__SCALAR_MAGIC, qmin) {
+TEST(QS8_IGEMM_MINMAX_FP32_3X4__SCALAR_FMAGIC, qmin) {
GemmMicrokernelTester()
.mr(3)
.nr(4)
@@ -79658,10 +79658,10 @@
.n(4)
.k(1)
.qmin(128)
- .Test(xnn_qs8_igemm_minmax_fp32_ukernel_3x4__scalar_magic, xnn_init_qs8_conv_minmax_fp32_scalar_magic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qs8_igemm_minmax_fp32_ukernel_3x4__scalar_fmagic, xnn_init_qs8_conv_minmax_fp32_scalar_fmagic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
-TEST(QS8_IGEMM_MINMAX_FP32_3X4__SCALAR_MAGIC, qmax) {
+TEST(QS8_IGEMM_MINMAX_FP32_3X4__SCALAR_FMAGIC, qmax) {
GemmMicrokernelTester()
.mr(3)
.nr(4)
@@ -79671,10 +79671,10 @@
.n(4)
.k(1)
.qmax(128)
- .Test(xnn_qs8_igemm_minmax_fp32_ukernel_3x4__scalar_magic, xnn_init_qs8_conv_minmax_fp32_scalar_magic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qs8_igemm_minmax_fp32_ukernel_3x4__scalar_fmagic, xnn_init_qs8_conv_minmax_fp32_scalar_fmagic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
-TEST(QS8_IGEMM_MINMAX_FP32_3X4__SCALAR_MAGIC, strided_cm) {
+TEST(QS8_IGEMM_MINMAX_FP32_3X4__SCALAR_FMAGIC, strided_cm) {
GemmMicrokernelTester()
.mr(3)
.nr(4)
@@ -79684,11 +79684,11 @@
.n(4)
.k(1)
.cm_stride(7)
- .Test(xnn_qs8_igemm_minmax_fp32_ukernel_3x4__scalar_magic, xnn_init_qs8_conv_minmax_fp32_scalar_magic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qs8_igemm_minmax_fp32_ukernel_3x4__scalar_fmagic, xnn_init_qs8_conv_minmax_fp32_scalar_fmagic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
-TEST(QS8_IGEMM_MINMAX_FP32_4X4__SCALAR_MAGIC, k_eq_1) {
+TEST(QS8_IGEMM_MINMAX_FP32_4X4__SCALAR_FMAGIC, k_eq_1) {
GemmMicrokernelTester()
.mr(4)
.nr(4)
@@ -79697,10 +79697,10 @@
.m(4)
.n(4)
.k(1)
- .Test(xnn_qs8_igemm_minmax_fp32_ukernel_4x4__scalar_magic, xnn_init_qs8_conv_minmax_fp32_scalar_magic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qs8_igemm_minmax_fp32_ukernel_4x4__scalar_fmagic, xnn_init_qs8_conv_minmax_fp32_scalar_fmagic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
-TEST(QS8_IGEMM_MINMAX_FP32_4X4__SCALAR_MAGIC, strided_cn) {
+TEST(QS8_IGEMM_MINMAX_FP32_4X4__SCALAR_FMAGIC, strided_cn) {
GemmMicrokernelTester()
.mr(4)
.nr(4)
@@ -79710,10 +79710,10 @@
.n(4)
.k(1)
.cn_stride(7)
- .Test(xnn_qs8_igemm_minmax_fp32_ukernel_4x4__scalar_magic, xnn_init_qs8_conv_minmax_fp32_scalar_magic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qs8_igemm_minmax_fp32_ukernel_4x4__scalar_fmagic, xnn_init_qs8_conv_minmax_fp32_scalar_fmagic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
-TEST(QS8_IGEMM_MINMAX_FP32_4X4__SCALAR_MAGIC, k_eq_1_subtile) {
+TEST(QS8_IGEMM_MINMAX_FP32_4X4__SCALAR_FMAGIC, k_eq_1_subtile) {
for (uint32_t m = 1; m <= 4; m++) {
for (uint32_t n = 1; n <= 4; n++) {
GemmMicrokernelTester()
@@ -79725,12 +79725,12 @@
.n(n)
.k(1)
.iterations(1)
- .Test(xnn_qs8_igemm_minmax_fp32_ukernel_4x4__scalar_magic, xnn_init_qs8_conv_minmax_fp32_scalar_magic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qs8_igemm_minmax_fp32_ukernel_4x4__scalar_fmagic, xnn_init_qs8_conv_minmax_fp32_scalar_fmagic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
}
-TEST(QS8_IGEMM_MINMAX_FP32_4X4__SCALAR_MAGIC, k_eq_1_subtile_m) {
+TEST(QS8_IGEMM_MINMAX_FP32_4X4__SCALAR_FMAGIC, k_eq_1_subtile_m) {
for (uint32_t m = 1; m <= 4; m++) {
GemmMicrokernelTester()
.mr(4)
@@ -79741,11 +79741,11 @@
.n(4)
.k(1)
.iterations(1)
- .Test(xnn_qs8_igemm_minmax_fp32_ukernel_4x4__scalar_magic, xnn_init_qs8_conv_minmax_fp32_scalar_magic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qs8_igemm_minmax_fp32_ukernel_4x4__scalar_fmagic, xnn_init_qs8_conv_minmax_fp32_scalar_fmagic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
-TEST(QS8_IGEMM_MINMAX_FP32_4X4__SCALAR_MAGIC, k_eq_1_subtile_n) {
+TEST(QS8_IGEMM_MINMAX_FP32_4X4__SCALAR_FMAGIC, k_eq_1_subtile_n) {
for (uint32_t n = 1; n <= 4; n++) {
GemmMicrokernelTester()
.mr(4)
@@ -79756,11 +79756,11 @@
.n(n)
.k(1)
.iterations(1)
- .Test(xnn_qs8_igemm_minmax_fp32_ukernel_4x4__scalar_magic, xnn_init_qs8_conv_minmax_fp32_scalar_magic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qs8_igemm_minmax_fp32_ukernel_4x4__scalar_fmagic, xnn_init_qs8_conv_minmax_fp32_scalar_fmagic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
-TEST(QS8_IGEMM_MINMAX_FP32_4X4__SCALAR_MAGIC, k_gt_1) {
+TEST(QS8_IGEMM_MINMAX_FP32_4X4__SCALAR_FMAGIC, k_gt_1) {
for (size_t k = 2; k < 10; k++) {
GemmMicrokernelTester()
.mr(4)
@@ -79770,11 +79770,11 @@
.m(4)
.n(4)
.k(k)
- .Test(xnn_qs8_igemm_minmax_fp32_ukernel_4x4__scalar_magic, xnn_init_qs8_conv_minmax_fp32_scalar_magic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qs8_igemm_minmax_fp32_ukernel_4x4__scalar_fmagic, xnn_init_qs8_conv_minmax_fp32_scalar_fmagic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
-TEST(QS8_IGEMM_MINMAX_FP32_4X4__SCALAR_MAGIC, k_gt_1_subtile) {
+TEST(QS8_IGEMM_MINMAX_FP32_4X4__SCALAR_FMAGIC, k_gt_1_subtile) {
for (size_t k = 2; k < 10; k++) {
for (uint32_t m = 1; m <= 4; m++) {
for (uint32_t n = 1; n <= 4; n++) {
@@ -79787,13 +79787,13 @@
.n(n)
.k(k)
.iterations(1)
- .Test(xnn_qs8_igemm_minmax_fp32_ukernel_4x4__scalar_magic, xnn_init_qs8_conv_minmax_fp32_scalar_magic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qs8_igemm_minmax_fp32_ukernel_4x4__scalar_fmagic, xnn_init_qs8_conv_minmax_fp32_scalar_fmagic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
}
}
-TEST(QS8_IGEMM_MINMAX_FP32_4X4__SCALAR_MAGIC, n_gt_4) {
+TEST(QS8_IGEMM_MINMAX_FP32_4X4__SCALAR_FMAGIC, n_gt_4) {
for (uint32_t n = 5; n < 8; n++) {
for (size_t k = 1; k <= 5; k += 2) {
GemmMicrokernelTester()
@@ -79804,12 +79804,12 @@
.m(4)
.n(4)
.k(k)
- .Test(xnn_qs8_igemm_minmax_fp32_ukernel_4x4__scalar_magic, xnn_init_qs8_conv_minmax_fp32_scalar_magic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qs8_igemm_minmax_fp32_ukernel_4x4__scalar_fmagic, xnn_init_qs8_conv_minmax_fp32_scalar_fmagic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
}
-TEST(QS8_IGEMM_MINMAX_FP32_4X4__SCALAR_MAGIC, n_gt_4_strided_cn) {
+TEST(QS8_IGEMM_MINMAX_FP32_4X4__SCALAR_FMAGIC, n_gt_4_strided_cn) {
for (uint32_t n = 5; n < 8; n++) {
for (size_t k = 1; k <= 5; k += 2) {
GemmMicrokernelTester()
@@ -79821,12 +79821,12 @@
.n(4)
.k(k)
.cn_stride(7)
- .Test(xnn_qs8_igemm_minmax_fp32_ukernel_4x4__scalar_magic, xnn_init_qs8_conv_minmax_fp32_scalar_magic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qs8_igemm_minmax_fp32_ukernel_4x4__scalar_fmagic, xnn_init_qs8_conv_minmax_fp32_scalar_fmagic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
}
-TEST(QS8_IGEMM_MINMAX_FP32_4X4__SCALAR_MAGIC, n_gt_4_subtile) {
+TEST(QS8_IGEMM_MINMAX_FP32_4X4__SCALAR_FMAGIC, n_gt_4_subtile) {
for (uint32_t n = 5; n < 8; n++) {
for (size_t k = 1; k <= 5; k += 2) {
for (uint32_t m = 1; m <= 4; m++) {
@@ -79839,13 +79839,13 @@
.n(n)
.k(k)
.iterations(1)
- .Test(xnn_qs8_igemm_minmax_fp32_ukernel_4x4__scalar_magic, xnn_init_qs8_conv_minmax_fp32_scalar_magic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qs8_igemm_minmax_fp32_ukernel_4x4__scalar_fmagic, xnn_init_qs8_conv_minmax_fp32_scalar_fmagic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
}
}
-TEST(QS8_IGEMM_MINMAX_FP32_4X4__SCALAR_MAGIC, n_div_4) {
+TEST(QS8_IGEMM_MINMAX_FP32_4X4__SCALAR_FMAGIC, n_div_4) {
for (uint32_t n = 8; n <= 12; n += 4) {
for (size_t k = 1; k <= 5; k += 2) {
GemmMicrokernelTester()
@@ -79856,12 +79856,12 @@
.m(4)
.n(4)
.k(k)
- .Test(xnn_qs8_igemm_minmax_fp32_ukernel_4x4__scalar_magic, xnn_init_qs8_conv_minmax_fp32_scalar_magic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qs8_igemm_minmax_fp32_ukernel_4x4__scalar_fmagic, xnn_init_qs8_conv_minmax_fp32_scalar_fmagic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
}
-TEST(QS8_IGEMM_MINMAX_FP32_4X4__SCALAR_MAGIC, n_div_4_strided_cn) {
+TEST(QS8_IGEMM_MINMAX_FP32_4X4__SCALAR_FMAGIC, n_div_4_strided_cn) {
for (uint32_t n = 8; n <= 12; n += 4) {
for (size_t k = 1; k <= 5; k += 2) {
GemmMicrokernelTester()
@@ -79873,12 +79873,12 @@
.n(n)
.k(k)
.cn_stride(7)
- .Test(xnn_qs8_igemm_minmax_fp32_ukernel_4x4__scalar_magic, xnn_init_qs8_conv_minmax_fp32_scalar_magic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qs8_igemm_minmax_fp32_ukernel_4x4__scalar_fmagic, xnn_init_qs8_conv_minmax_fp32_scalar_fmagic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
}
-TEST(QS8_IGEMM_MINMAX_FP32_4X4__SCALAR_MAGIC, n_div_4_subtile) {
+TEST(QS8_IGEMM_MINMAX_FP32_4X4__SCALAR_FMAGIC, n_div_4_subtile) {
for (uint32_t n = 8; n <= 12; n += 4) {
for (size_t k = 1; k <= 5; k += 2) {
for (uint32_t m = 1; m <= 4; m++) {
@@ -79891,13 +79891,13 @@
.n(n)
.k(k)
.iterations(1)
- .Test(xnn_qs8_igemm_minmax_fp32_ukernel_4x4__scalar_magic, xnn_init_qs8_conv_minmax_fp32_scalar_magic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qs8_igemm_minmax_fp32_ukernel_4x4__scalar_fmagic, xnn_init_qs8_conv_minmax_fp32_scalar_fmagic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
}
}
-TEST(QS8_IGEMM_MINMAX_FP32_4X4__SCALAR_MAGIC, small_kernel) {
+TEST(QS8_IGEMM_MINMAX_FP32_4X4__SCALAR_FMAGIC, small_kernel) {
for (size_t k = 1; k <= 5; k += 2) {
GemmMicrokernelTester()
.mr(4)
@@ -79908,11 +79908,11 @@
.n(4)
.k(k)
.ks(3)
- .Test(xnn_qs8_igemm_minmax_fp32_ukernel_4x4__scalar_magic, xnn_init_qs8_conv_minmax_fp32_scalar_magic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qs8_igemm_minmax_fp32_ukernel_4x4__scalar_fmagic, xnn_init_qs8_conv_minmax_fp32_scalar_fmagic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
-TEST(QS8_IGEMM_MINMAX_FP32_4X4__SCALAR_MAGIC, small_kernel_subtile) {
+TEST(QS8_IGEMM_MINMAX_FP32_4X4__SCALAR_FMAGIC, small_kernel_subtile) {
for (size_t k = 1; k <= 5; k += 2) {
for (uint32_t m = 1; m <= 4; m++) {
for (uint32_t n = 1; n <= 4; n++) {
@@ -79926,13 +79926,13 @@
.k(k)
.ks(3)
.iterations(1)
- .Test(xnn_qs8_igemm_minmax_fp32_ukernel_4x4__scalar_magic, xnn_init_qs8_conv_minmax_fp32_scalar_magic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qs8_igemm_minmax_fp32_ukernel_4x4__scalar_fmagic, xnn_init_qs8_conv_minmax_fp32_scalar_fmagic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
}
}
-TEST(QS8_IGEMM_MINMAX_FP32_4X4__SCALAR_MAGIC, n_gt_4_small_kernel) {
+TEST(QS8_IGEMM_MINMAX_FP32_4X4__SCALAR_FMAGIC, n_gt_4_small_kernel) {
for (uint32_t n = 5; n < 8; n++) {
for (size_t k = 1; k <= 5; k += 2) {
GemmMicrokernelTester()
@@ -79944,12 +79944,12 @@
.n(4)
.k(k)
.ks(3)
- .Test(xnn_qs8_igemm_minmax_fp32_ukernel_4x4__scalar_magic, xnn_init_qs8_conv_minmax_fp32_scalar_magic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qs8_igemm_minmax_fp32_ukernel_4x4__scalar_fmagic, xnn_init_qs8_conv_minmax_fp32_scalar_fmagic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
}
-TEST(QS8_IGEMM_MINMAX_FP32_4X4__SCALAR_MAGIC, n_div_4_small_kernel) {
+TEST(QS8_IGEMM_MINMAX_FP32_4X4__SCALAR_FMAGIC, n_div_4_small_kernel) {
for (uint32_t n = 8; n <= 12; n += 4) {
for (size_t k = 1; k <= 5; k += 2) {
GemmMicrokernelTester()
@@ -79961,12 +79961,12 @@
.n(4)
.k(k)
.ks(3)
- .Test(xnn_qs8_igemm_minmax_fp32_ukernel_4x4__scalar_magic, xnn_init_qs8_conv_minmax_fp32_scalar_magic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qs8_igemm_minmax_fp32_ukernel_4x4__scalar_fmagic, xnn_init_qs8_conv_minmax_fp32_scalar_fmagic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
}
-TEST(QS8_IGEMM_MINMAX_FP32_4X4__SCALAR_MAGIC, strided_cm_subtile) {
+TEST(QS8_IGEMM_MINMAX_FP32_4X4__SCALAR_FMAGIC, strided_cm_subtile) {
for (size_t k = 1; k <= 5; k += 2) {
for (uint32_t m = 1; m <= 4; m++) {
for (uint32_t n = 1; n <= 4; n++) {
@@ -79980,13 +79980,13 @@
.k(k)
.cm_stride(7)
.iterations(1)
- .Test(xnn_qs8_igemm_minmax_fp32_ukernel_4x4__scalar_magic, xnn_init_qs8_conv_minmax_fp32_scalar_magic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qs8_igemm_minmax_fp32_ukernel_4x4__scalar_fmagic, xnn_init_qs8_conv_minmax_fp32_scalar_fmagic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
}
}
-TEST(QS8_IGEMM_MINMAX_FP32_4X4__SCALAR_MAGIC, a_offset) {
+TEST(QS8_IGEMM_MINMAX_FP32_4X4__SCALAR_FMAGIC, a_offset) {
for (size_t k = 1; k <= 5; k += 2) {
GemmMicrokernelTester()
.mr(4)
@@ -79998,11 +79998,11 @@
.k(k)
.ks(3)
.a_offset(23)
- .Test(xnn_qs8_igemm_minmax_fp32_ukernel_4x4__scalar_magic, xnn_init_qs8_conv_minmax_fp32_scalar_magic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qs8_igemm_minmax_fp32_ukernel_4x4__scalar_fmagic, xnn_init_qs8_conv_minmax_fp32_scalar_fmagic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
-TEST(QS8_IGEMM_MINMAX_FP32_4X4__SCALAR_MAGIC, zero) {
+TEST(QS8_IGEMM_MINMAX_FP32_4X4__SCALAR_FMAGIC, zero) {
for (uint32_t mz = 0; mz < 4; mz++) {
for (size_t k = 1; k <= 5; k += 2) {
GemmMicrokernelTester()
@@ -80016,12 +80016,12 @@
.ks(3)
.a_offset(23)
.zero_index(mz)
- .Test(xnn_qs8_igemm_minmax_fp32_ukernel_4x4__scalar_magic, xnn_init_qs8_conv_minmax_fp32_scalar_magic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qs8_igemm_minmax_fp32_ukernel_4x4__scalar_fmagic, xnn_init_qs8_conv_minmax_fp32_scalar_fmagic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
}
}
-TEST(QS8_IGEMM_MINMAX_FP32_4X4__SCALAR_MAGIC, qmin) {
+TEST(QS8_IGEMM_MINMAX_FP32_4X4__SCALAR_FMAGIC, qmin) {
GemmMicrokernelTester()
.mr(4)
.nr(4)
@@ -80031,10 +80031,10 @@
.n(4)
.k(1)
.qmin(128)
- .Test(xnn_qs8_igemm_minmax_fp32_ukernel_4x4__scalar_magic, xnn_init_qs8_conv_minmax_fp32_scalar_magic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qs8_igemm_minmax_fp32_ukernel_4x4__scalar_fmagic, xnn_init_qs8_conv_minmax_fp32_scalar_fmagic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
-TEST(QS8_IGEMM_MINMAX_FP32_4X4__SCALAR_MAGIC, qmax) {
+TEST(QS8_IGEMM_MINMAX_FP32_4X4__SCALAR_FMAGIC, qmax) {
GemmMicrokernelTester()
.mr(4)
.nr(4)
@@ -80044,10 +80044,10 @@
.n(4)
.k(1)
.qmax(128)
- .Test(xnn_qs8_igemm_minmax_fp32_ukernel_4x4__scalar_magic, xnn_init_qs8_conv_minmax_fp32_scalar_magic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qs8_igemm_minmax_fp32_ukernel_4x4__scalar_fmagic, xnn_init_qs8_conv_minmax_fp32_scalar_fmagic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
-TEST(QS8_IGEMM_MINMAX_FP32_4X4__SCALAR_MAGIC, strided_cm) {
+TEST(QS8_IGEMM_MINMAX_FP32_4X4__SCALAR_FMAGIC, strided_cm) {
GemmMicrokernelTester()
.mr(4)
.nr(4)
@@ -80057,5 +80057,5 @@
.n(4)
.k(1)
.cm_stride(7)
- .Test(xnn_qs8_igemm_minmax_fp32_ukernel_4x4__scalar_magic, xnn_init_qs8_conv_minmax_fp32_scalar_magic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ .Test(xnn_qs8_igemm_minmax_fp32_ukernel_4x4__scalar_fmagic, xnn_init_qs8_conv_minmax_fp32_scalar_fmagic_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
}
diff --git a/test/qs8-igemm-minmax-fp32.yaml b/test/qs8-igemm-minmax-fp32.yaml
index 5c27d55..d81de8c 100644
--- a/test/qs8-igemm-minmax-fp32.yaml
+++ b/test/qs8-igemm-minmax-fp32.yaml
@@ -499,51 +499,51 @@
- name: xnn_qs8_igemm_minmax_fp32_ukernel_3x4c8__wasmsimd_mul16_ld128
init: xnn_init_qs8_conv_minmax_fp32_wasmsimd_params
k-block: 8
-- name: xnn_qs8_igemm_minmax_fp32_ukernel_1x2__scalar_lrint
- init: xnn_init_qs8_conv_minmax_fp32_scalar_lrint_params
+- name: xnn_qs8_igemm_minmax_fp32_ukernel_1x2__scalar_lrintf
+ init: xnn_init_qs8_conv_minmax_fp32_scalar_lrintf_params
k-block: 1
-- name: xnn_qs8_igemm_minmax_fp32_ukernel_2x2__scalar_lrint
- init: xnn_init_qs8_conv_minmax_fp32_scalar_lrint_params
+- name: xnn_qs8_igemm_minmax_fp32_ukernel_2x2__scalar_lrintf
+ init: xnn_init_qs8_conv_minmax_fp32_scalar_lrintf_params
k-block: 1
-- name: xnn_qs8_igemm_minmax_fp32_ukernel_3x2__scalar_lrint
- init: xnn_init_qs8_conv_minmax_fp32_scalar_lrint_params
+- name: xnn_qs8_igemm_minmax_fp32_ukernel_3x2__scalar_lrintf
+ init: xnn_init_qs8_conv_minmax_fp32_scalar_lrintf_params
k-block: 1
-- name: xnn_qs8_igemm_minmax_fp32_ukernel_4x2__scalar_lrint
- init: xnn_init_qs8_conv_minmax_fp32_scalar_lrint_params
+- name: xnn_qs8_igemm_minmax_fp32_ukernel_4x2__scalar_lrintf
+ init: xnn_init_qs8_conv_minmax_fp32_scalar_lrintf_params
k-block: 1
-- name: xnn_qs8_igemm_minmax_fp32_ukernel_1x4__scalar_lrint
- init: xnn_init_qs8_conv_minmax_fp32_scalar_lrint_params
+- name: xnn_qs8_igemm_minmax_fp32_ukernel_1x4__scalar_lrintf
+ init: xnn_init_qs8_conv_minmax_fp32_scalar_lrintf_params
k-block: 1
-- name: xnn_qs8_igemm_minmax_fp32_ukernel_2x4__scalar_lrint
- init: xnn_init_qs8_conv_minmax_fp32_scalar_lrint_params
+- name: xnn_qs8_igemm_minmax_fp32_ukernel_2x4__scalar_lrintf
+ init: xnn_init_qs8_conv_minmax_fp32_scalar_lrintf_params
k-block: 1
-- name: xnn_qs8_igemm_minmax_fp32_ukernel_3x4__scalar_lrint
- init: xnn_init_qs8_conv_minmax_fp32_scalar_lrint_params
+- name: xnn_qs8_igemm_minmax_fp32_ukernel_3x4__scalar_lrintf
+ init: xnn_init_qs8_conv_minmax_fp32_scalar_lrintf_params
k-block: 1
-- name: xnn_qs8_igemm_minmax_fp32_ukernel_4x4__scalar_lrint
- init: xnn_init_qs8_conv_minmax_fp32_scalar_lrint_params
+- name: xnn_qs8_igemm_minmax_fp32_ukernel_4x4__scalar_lrintf
+ init: xnn_init_qs8_conv_minmax_fp32_scalar_lrintf_params
k-block: 1
-- name: xnn_qs8_igemm_minmax_fp32_ukernel_1x2__scalar_magic
- init: xnn_init_qs8_conv_minmax_fp32_scalar_magic_params
+- name: xnn_qs8_igemm_minmax_fp32_ukernel_1x2__scalar_fmagic
+ init: xnn_init_qs8_conv_minmax_fp32_scalar_fmagic_params
k-block: 1
-- name: xnn_qs8_igemm_minmax_fp32_ukernel_2x2__scalar_magic
- init: xnn_init_qs8_conv_minmax_fp32_scalar_magic_params
+- name: xnn_qs8_igemm_minmax_fp32_ukernel_2x2__scalar_fmagic
+ init: xnn_init_qs8_conv_minmax_fp32_scalar_fmagic_params
k-block: 1
-- name: xnn_qs8_igemm_minmax_fp32_ukernel_3x2__scalar_magic
- init: xnn_init_qs8_conv_minmax_fp32_scalar_magic_params
+- name: xnn_qs8_igemm_minmax_fp32_ukernel_3x2__scalar_fmagic
+ init: xnn_init_qs8_conv_minmax_fp32_scalar_fmagic_params
k-block: 1
-- name: xnn_qs8_igemm_minmax_fp32_ukernel_4x2__scalar_magic
- init: xnn_init_qs8_conv_minmax_fp32_scalar_magic_params
+- name: xnn_qs8_igemm_minmax_fp32_ukernel_4x2__scalar_fmagic
+ init: xnn_init_qs8_conv_minmax_fp32_scalar_fmagic_params
k-block: 1
-- name: xnn_qs8_igemm_minmax_fp32_ukernel_1x4__scalar_magic
- init: xnn_init_qs8_conv_minmax_fp32_scalar_magic_params
+- name: xnn_qs8_igemm_minmax_fp32_ukernel_1x4__scalar_fmagic
+ init: xnn_init_qs8_conv_minmax_fp32_scalar_fmagic_params
k-block: 1
-- name: xnn_qs8_igemm_minmax_fp32_ukernel_2x4__scalar_magic
- init: xnn_init_qs8_conv_minmax_fp32_scalar_magic_params
+- name: xnn_qs8_igemm_minmax_fp32_ukernel_2x4__scalar_fmagic
+ init: xnn_init_qs8_conv_minmax_fp32_scalar_fmagic_params
k-block: 1
-- name: xnn_qs8_igemm_minmax_fp32_ukernel_3x4__scalar_magic
- init: xnn_init_qs8_conv_minmax_fp32_scalar_magic_params
+- name: xnn_qs8_igemm_minmax_fp32_ukernel_3x4__scalar_fmagic
+ init: xnn_init_qs8_conv_minmax_fp32_scalar_fmagic_params
k-block: 1
-- name: xnn_qs8_igemm_minmax_fp32_ukernel_4x4__scalar_magic
- init: xnn_init_qs8_conv_minmax_fp32_scalar_magic_params
+- name: xnn_qs8_igemm_minmax_fp32_ukernel_4x4__scalar_fmagic
+ init: xnn_init_qs8_conv_minmax_fp32_scalar_fmagic_params
k-block: 1
diff --git a/test/qs8-igemm-minmax-rndnu.cc b/test/qs8-igemm-minmax-rndnu.cc
index 8e7d0e3..739368b 100644
--- a/test/qs8-igemm-minmax-rndnu.cc
+++ b/test/qs8-igemm-minmax-rndnu.cc
@@ -105321,2987 +105321,3 @@
.Test(xnn_qs8_igemm_minmax_rndnu_ukernel_4x16__neon_mull_addw_dup, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
}
#endif // XNN_ARCH_ARM || XNN_ARCH_ARM64
-
-
-TEST(QS8_IGEMM_MINMAX_RNDNU_1X2__SCALAR, k_eq_1) {
- GemmMicrokernelTester()
- .mr(1)
- .nr(2)
- .kr(1)
- .sr(1)
- .m(1)
- .n(2)
- .k(1)
- .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_1x2__scalar, xnn_init_qs8_conv_minmax_rndnu_scalar_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
-}
-
-TEST(QS8_IGEMM_MINMAX_RNDNU_1X2__SCALAR, strided_cn) {
- GemmMicrokernelTester()
- .mr(1)
- .nr(2)
- .kr(1)
- .sr(1)
- .m(1)
- .n(2)
- .k(1)
- .cn_stride(5)
- .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_1x2__scalar, xnn_init_qs8_conv_minmax_rndnu_scalar_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
-}
-
-TEST(QS8_IGEMM_MINMAX_RNDNU_1X2__SCALAR, k_eq_1_subtile) {
- for (uint32_t m = 1; m <= 1; m++) {
- for (uint32_t n = 1; n <= 2; n++) {
- GemmMicrokernelTester()
- .mr(1)
- .nr(2)
- .kr(1)
- .sr(1)
- .m(m)
- .n(n)
- .k(1)
- .iterations(1)
- .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_1x2__scalar, xnn_init_qs8_conv_minmax_rndnu_scalar_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
- }
- }
-}
-
-TEST(QS8_IGEMM_MINMAX_RNDNU_1X2__SCALAR, k_eq_1_subtile_m) {
- for (uint32_t m = 1; m <= 1; m++) {
- GemmMicrokernelTester()
- .mr(1)
- .nr(2)
- .kr(1)
- .sr(1)
- .m(m)
- .n(2)
- .k(1)
- .iterations(1)
- .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_1x2__scalar, xnn_init_qs8_conv_minmax_rndnu_scalar_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
- }
-}
-
-TEST(QS8_IGEMM_MINMAX_RNDNU_1X2__SCALAR, k_eq_1_subtile_n) {
- for (uint32_t n = 1; n <= 2; n++) {
- GemmMicrokernelTester()
- .mr(1)
- .nr(2)
- .kr(1)
- .sr(1)
- .m(1)
- .n(n)
- .k(1)
- .iterations(1)
- .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_1x2__scalar, xnn_init_qs8_conv_minmax_rndnu_scalar_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
- }
-}
-
-TEST(QS8_IGEMM_MINMAX_RNDNU_1X2__SCALAR, k_gt_1) {
- for (size_t k = 2; k < 10; k++) {
- GemmMicrokernelTester()
- .mr(1)
- .nr(2)
- .kr(1)
- .sr(1)
- .m(1)
- .n(2)
- .k(k)
- .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_1x2__scalar, xnn_init_qs8_conv_minmax_rndnu_scalar_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
- }
-}
-
-TEST(QS8_IGEMM_MINMAX_RNDNU_1X2__SCALAR, k_gt_1_subtile) {
- for (size_t k = 2; k < 10; k++) {
- for (uint32_t m = 1; m <= 1; m++) {
- for (uint32_t n = 1; n <= 2; n++) {
- GemmMicrokernelTester()
- .mr(1)
- .nr(2)
- .kr(1)
- .sr(1)
- .m(m)
- .n(n)
- .k(k)
- .iterations(1)
- .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_1x2__scalar, xnn_init_qs8_conv_minmax_rndnu_scalar_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
- }
- }
- }
-}
-
-TEST(QS8_IGEMM_MINMAX_RNDNU_1X2__SCALAR, n_gt_2) {
- for (uint32_t n = 3; n < 4; n++) {
- for (size_t k = 1; k <= 5; k += 2) {
- GemmMicrokernelTester()
- .mr(1)
- .nr(2)
- .kr(1)
- .sr(1)
- .m(1)
- .n(2)
- .k(k)
- .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_1x2__scalar, xnn_init_qs8_conv_minmax_rndnu_scalar_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
- }
- }
-}
-
-TEST(QS8_IGEMM_MINMAX_RNDNU_1X2__SCALAR, n_gt_2_strided_cn) {
- for (uint32_t n = 3; n < 4; n++) {
- for (size_t k = 1; k <= 5; k += 2) {
- GemmMicrokernelTester()
- .mr(1)
- .nr(2)
- .kr(1)
- .sr(1)
- .m(1)
- .n(2)
- .k(k)
- .cn_stride(5)
- .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_1x2__scalar, xnn_init_qs8_conv_minmax_rndnu_scalar_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
- }
- }
-}
-
-TEST(QS8_IGEMM_MINMAX_RNDNU_1X2__SCALAR, n_gt_2_subtile) {
- for (uint32_t n = 3; n < 4; n++) {
- for (size_t k = 1; k <= 5; k += 2) {
- for (uint32_t m = 1; m <= 1; m++) {
- GemmMicrokernelTester()
- .mr(1)
- .nr(2)
- .kr(1)
- .sr(1)
- .m(m)
- .n(n)
- .k(k)
- .iterations(1)
- .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_1x2__scalar, xnn_init_qs8_conv_minmax_rndnu_scalar_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
- }
- }
- }
-}
-
-TEST(QS8_IGEMM_MINMAX_RNDNU_1X2__SCALAR, n_div_2) {
- for (uint32_t n = 4; n <= 6; n += 2) {
- for (size_t k = 1; k <= 5; k += 2) {
- GemmMicrokernelTester()
- .mr(1)
- .nr(2)
- .kr(1)
- .sr(1)
- .m(1)
- .n(2)
- .k(k)
- .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_1x2__scalar, xnn_init_qs8_conv_minmax_rndnu_scalar_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
- }
- }
-}
-
-TEST(QS8_IGEMM_MINMAX_RNDNU_1X2__SCALAR, n_div_2_strided_cn) {
- for (uint32_t n = 4; n <= 6; n += 2) {
- for (size_t k = 1; k <= 5; k += 2) {
- GemmMicrokernelTester()
- .mr(1)
- .nr(2)
- .kr(1)
- .sr(1)
- .m(1)
- .n(n)
- .k(k)
- .cn_stride(5)
- .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_1x2__scalar, xnn_init_qs8_conv_minmax_rndnu_scalar_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
- }
- }
-}
-
-TEST(QS8_IGEMM_MINMAX_RNDNU_1X2__SCALAR, n_div_2_subtile) {
- for (uint32_t n = 4; n <= 6; n += 2) {
- for (size_t k = 1; k <= 5; k += 2) {
- for (uint32_t m = 1; m <= 1; m++) {
- GemmMicrokernelTester()
- .mr(1)
- .nr(2)
- .kr(1)
- .sr(1)
- .m(m)
- .n(n)
- .k(k)
- .iterations(1)
- .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_1x2__scalar, xnn_init_qs8_conv_minmax_rndnu_scalar_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
- }
- }
- }
-}
-
-TEST(QS8_IGEMM_MINMAX_RNDNU_1X2__SCALAR, small_kernel) {
- for (size_t k = 1; k <= 5; k += 2) {
- GemmMicrokernelTester()
- .mr(1)
- .nr(2)
- .kr(1)
- .sr(1)
- .m(1)
- .n(2)
- .k(k)
- .ks(3)
- .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_1x2__scalar, xnn_init_qs8_conv_minmax_rndnu_scalar_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
- }
-}
-
-TEST(QS8_IGEMM_MINMAX_RNDNU_1X2__SCALAR, small_kernel_subtile) {
- for (size_t k = 1; k <= 5; k += 2) {
- for (uint32_t m = 1; m <= 1; m++) {
- for (uint32_t n = 1; n <= 2; n++) {
- GemmMicrokernelTester()
- .mr(1)
- .nr(2)
- .kr(1)
- .sr(1)
- .m(m)
- .n(n)
- .k(k)
- .ks(3)
- .iterations(1)
- .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_1x2__scalar, xnn_init_qs8_conv_minmax_rndnu_scalar_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
- }
- }
- }
-}
-
-TEST(QS8_IGEMM_MINMAX_RNDNU_1X2__SCALAR, n_gt_2_small_kernel) {
- for (uint32_t n = 3; n < 4; n++) {
- for (size_t k = 1; k <= 5; k += 2) {
- GemmMicrokernelTester()
- .mr(1)
- .nr(2)
- .kr(1)
- .sr(1)
- .m(1)
- .n(2)
- .k(k)
- .ks(3)
- .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_1x2__scalar, xnn_init_qs8_conv_minmax_rndnu_scalar_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
- }
- }
-}
-
-TEST(QS8_IGEMM_MINMAX_RNDNU_1X2__SCALAR, n_div_2_small_kernel) {
- for (uint32_t n = 4; n <= 6; n += 2) {
- for (size_t k = 1; k <= 5; k += 2) {
- GemmMicrokernelTester()
- .mr(1)
- .nr(2)
- .kr(1)
- .sr(1)
- .m(1)
- .n(2)
- .k(k)
- .ks(3)
- .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_1x2__scalar, xnn_init_qs8_conv_minmax_rndnu_scalar_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
- }
- }
-}
-
-TEST(QS8_IGEMM_MINMAX_RNDNU_1X2__SCALAR, strided_cm_subtile) {
- for (size_t k = 1; k <= 5; k += 2) {
- for (uint32_t m = 1; m <= 1; m++) {
- for (uint32_t n = 1; n <= 2; n++) {
- GemmMicrokernelTester()
- .mr(1)
- .nr(2)
- .kr(1)
- .sr(1)
- .m(m)
- .n(n)
- .k(k)
- .cm_stride(5)
- .iterations(1)
- .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_1x2__scalar, xnn_init_qs8_conv_minmax_rndnu_scalar_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
- }
- }
- }
-}
-
-TEST(QS8_IGEMM_MINMAX_RNDNU_1X2__SCALAR, a_offset) {
- for (size_t k = 1; k <= 5; k += 2) {
- GemmMicrokernelTester()
- .mr(1)
- .nr(2)
- .kr(1)
- .sr(1)
- .m(1)
- .n(2)
- .k(k)
- .ks(3)
- .a_offset(7)
- .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_1x2__scalar, xnn_init_qs8_conv_minmax_rndnu_scalar_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
- }
-}
-
-TEST(QS8_IGEMM_MINMAX_RNDNU_1X2__SCALAR, zero) {
- for (uint32_t mz = 0; mz < 1; mz++) {
- for (size_t k = 1; k <= 5; k += 2) {
- GemmMicrokernelTester()
- .mr(1)
- .nr(2)
- .kr(1)
- .sr(1)
- .m(1)
- .n(2)
- .k(k)
- .ks(3)
- .a_offset(7)
- .zero_index(mz)
- .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_1x2__scalar, xnn_init_qs8_conv_minmax_rndnu_scalar_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
- }
- }
-}
-
-TEST(QS8_IGEMM_MINMAX_RNDNU_1X2__SCALAR, qmin) {
- GemmMicrokernelTester()
- .mr(1)
- .nr(2)
- .kr(1)
- .sr(1)
- .m(1)
- .n(2)
- .k(1)
- .qmin(128)
- .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_1x2__scalar, xnn_init_qs8_conv_minmax_rndnu_scalar_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
-}
-
-TEST(QS8_IGEMM_MINMAX_RNDNU_1X2__SCALAR, qmax) {
- GemmMicrokernelTester()
- .mr(1)
- .nr(2)
- .kr(1)
- .sr(1)
- .m(1)
- .n(2)
- .k(1)
- .qmax(128)
- .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_1x2__scalar, xnn_init_qs8_conv_minmax_rndnu_scalar_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
-}
-
-TEST(QS8_IGEMM_MINMAX_RNDNU_1X2__SCALAR, strided_cm) {
- GemmMicrokernelTester()
- .mr(1)
- .nr(2)
- .kr(1)
- .sr(1)
- .m(1)
- .n(2)
- .k(1)
- .cm_stride(5)
- .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_1x2__scalar, xnn_init_qs8_conv_minmax_rndnu_scalar_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
-}
-
-
-TEST(QS8_IGEMM_MINMAX_RNDNU_2X2__SCALAR, k_eq_1) {
- GemmMicrokernelTester()
- .mr(2)
- .nr(2)
- .kr(1)
- .sr(1)
- .m(2)
- .n(2)
- .k(1)
- .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_2x2__scalar, xnn_init_qs8_conv_minmax_rndnu_scalar_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
-}
-
-TEST(QS8_IGEMM_MINMAX_RNDNU_2X2__SCALAR, strided_cn) {
- GemmMicrokernelTester()
- .mr(2)
- .nr(2)
- .kr(1)
- .sr(1)
- .m(2)
- .n(2)
- .k(1)
- .cn_stride(5)
- .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_2x2__scalar, xnn_init_qs8_conv_minmax_rndnu_scalar_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
-}
-
-TEST(QS8_IGEMM_MINMAX_RNDNU_2X2__SCALAR, k_eq_1_subtile) {
- for (uint32_t m = 1; m <= 2; m++) {
- for (uint32_t n = 1; n <= 2; n++) {
- GemmMicrokernelTester()
- .mr(2)
- .nr(2)
- .kr(1)
- .sr(1)
- .m(m)
- .n(n)
- .k(1)
- .iterations(1)
- .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_2x2__scalar, xnn_init_qs8_conv_minmax_rndnu_scalar_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
- }
- }
-}
-
-TEST(QS8_IGEMM_MINMAX_RNDNU_2X2__SCALAR, k_eq_1_subtile_m) {
- for (uint32_t m = 1; m <= 2; m++) {
- GemmMicrokernelTester()
- .mr(2)
- .nr(2)
- .kr(1)
- .sr(1)
- .m(m)
- .n(2)
- .k(1)
- .iterations(1)
- .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_2x2__scalar, xnn_init_qs8_conv_minmax_rndnu_scalar_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
- }
-}
-
-TEST(QS8_IGEMM_MINMAX_RNDNU_2X2__SCALAR, k_eq_1_subtile_n) {
- for (uint32_t n = 1; n <= 2; n++) {
- GemmMicrokernelTester()
- .mr(2)
- .nr(2)
- .kr(1)
- .sr(1)
- .m(2)
- .n(n)
- .k(1)
- .iterations(1)
- .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_2x2__scalar, xnn_init_qs8_conv_minmax_rndnu_scalar_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
- }
-}
-
-TEST(QS8_IGEMM_MINMAX_RNDNU_2X2__SCALAR, k_gt_1) {
- for (size_t k = 2; k < 10; k++) {
- GemmMicrokernelTester()
- .mr(2)
- .nr(2)
- .kr(1)
- .sr(1)
- .m(2)
- .n(2)
- .k(k)
- .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_2x2__scalar, xnn_init_qs8_conv_minmax_rndnu_scalar_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
- }
-}
-
-TEST(QS8_IGEMM_MINMAX_RNDNU_2X2__SCALAR, k_gt_1_subtile) {
- for (size_t k = 2; k < 10; k++) {
- for (uint32_t m = 1; m <= 2; m++) {
- for (uint32_t n = 1; n <= 2; n++) {
- GemmMicrokernelTester()
- .mr(2)
- .nr(2)
- .kr(1)
- .sr(1)
- .m(m)
- .n(n)
- .k(k)
- .iterations(1)
- .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_2x2__scalar, xnn_init_qs8_conv_minmax_rndnu_scalar_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
- }
- }
- }
-}
-
-TEST(QS8_IGEMM_MINMAX_RNDNU_2X2__SCALAR, n_gt_2) {
- for (uint32_t n = 3; n < 4; n++) {
- for (size_t k = 1; k <= 5; k += 2) {
- GemmMicrokernelTester()
- .mr(2)
- .nr(2)
- .kr(1)
- .sr(1)
- .m(2)
- .n(2)
- .k(k)
- .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_2x2__scalar, xnn_init_qs8_conv_minmax_rndnu_scalar_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
- }
- }
-}
-
-TEST(QS8_IGEMM_MINMAX_RNDNU_2X2__SCALAR, n_gt_2_strided_cn) {
- for (uint32_t n = 3; n < 4; n++) {
- for (size_t k = 1; k <= 5; k += 2) {
- GemmMicrokernelTester()
- .mr(2)
- .nr(2)
- .kr(1)
- .sr(1)
- .m(2)
- .n(2)
- .k(k)
- .cn_stride(5)
- .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_2x2__scalar, xnn_init_qs8_conv_minmax_rndnu_scalar_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
- }
- }
-}
-
-TEST(QS8_IGEMM_MINMAX_RNDNU_2X2__SCALAR, n_gt_2_subtile) {
- for (uint32_t n = 3; n < 4; n++) {
- for (size_t k = 1; k <= 5; k += 2) {
- for (uint32_t m = 1; m <= 2; m++) {
- GemmMicrokernelTester()
- .mr(2)
- .nr(2)
- .kr(1)
- .sr(1)
- .m(m)
- .n(n)
- .k(k)
- .iterations(1)
- .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_2x2__scalar, xnn_init_qs8_conv_minmax_rndnu_scalar_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
- }
- }
- }
-}
-
-TEST(QS8_IGEMM_MINMAX_RNDNU_2X2__SCALAR, n_div_2) {
- for (uint32_t n = 4; n <= 6; n += 2) {
- for (size_t k = 1; k <= 5; k += 2) {
- GemmMicrokernelTester()
- .mr(2)
- .nr(2)
- .kr(1)
- .sr(1)
- .m(2)
- .n(2)
- .k(k)
- .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_2x2__scalar, xnn_init_qs8_conv_minmax_rndnu_scalar_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
- }
- }
-}
-
-TEST(QS8_IGEMM_MINMAX_RNDNU_2X2__SCALAR, n_div_2_strided_cn) {
- for (uint32_t n = 4; n <= 6; n += 2) {
- for (size_t k = 1; k <= 5; k += 2) {
- GemmMicrokernelTester()
- .mr(2)
- .nr(2)
- .kr(1)
- .sr(1)
- .m(2)
- .n(n)
- .k(k)
- .cn_stride(5)
- .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_2x2__scalar, xnn_init_qs8_conv_minmax_rndnu_scalar_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
- }
- }
-}
-
-TEST(QS8_IGEMM_MINMAX_RNDNU_2X2__SCALAR, n_div_2_subtile) {
- for (uint32_t n = 4; n <= 6; n += 2) {
- for (size_t k = 1; k <= 5; k += 2) {
- for (uint32_t m = 1; m <= 2; m++) {
- GemmMicrokernelTester()
- .mr(2)
- .nr(2)
- .kr(1)
- .sr(1)
- .m(m)
- .n(n)
- .k(k)
- .iterations(1)
- .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_2x2__scalar, xnn_init_qs8_conv_minmax_rndnu_scalar_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
- }
- }
- }
-}
-
-TEST(QS8_IGEMM_MINMAX_RNDNU_2X2__SCALAR, small_kernel) {
- for (size_t k = 1; k <= 5; k += 2) {
- GemmMicrokernelTester()
- .mr(2)
- .nr(2)
- .kr(1)
- .sr(1)
- .m(2)
- .n(2)
- .k(k)
- .ks(3)
- .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_2x2__scalar, xnn_init_qs8_conv_minmax_rndnu_scalar_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
- }
-}
-
-TEST(QS8_IGEMM_MINMAX_RNDNU_2X2__SCALAR, small_kernel_subtile) {
- for (size_t k = 1; k <= 5; k += 2) {
- for (uint32_t m = 1; m <= 2; m++) {
- for (uint32_t n = 1; n <= 2; n++) {
- GemmMicrokernelTester()
- .mr(2)
- .nr(2)
- .kr(1)
- .sr(1)
- .m(m)
- .n(n)
- .k(k)
- .ks(3)
- .iterations(1)
- .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_2x2__scalar, xnn_init_qs8_conv_minmax_rndnu_scalar_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
- }
- }
- }
-}
-
-TEST(QS8_IGEMM_MINMAX_RNDNU_2X2__SCALAR, n_gt_2_small_kernel) {
- for (uint32_t n = 3; n < 4; n++) {
- for (size_t k = 1; k <= 5; k += 2) {
- GemmMicrokernelTester()
- .mr(2)
- .nr(2)
- .kr(1)
- .sr(1)
- .m(2)
- .n(2)
- .k(k)
- .ks(3)
- .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_2x2__scalar, xnn_init_qs8_conv_minmax_rndnu_scalar_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
- }
- }
-}
-
-TEST(QS8_IGEMM_MINMAX_RNDNU_2X2__SCALAR, n_div_2_small_kernel) {
- for (uint32_t n = 4; n <= 6; n += 2) {
- for (size_t k = 1; k <= 5; k += 2) {
- GemmMicrokernelTester()
- .mr(2)
- .nr(2)
- .kr(1)
- .sr(1)
- .m(2)
- .n(2)
- .k(k)
- .ks(3)
- .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_2x2__scalar, xnn_init_qs8_conv_minmax_rndnu_scalar_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
- }
- }
-}
-
-TEST(QS8_IGEMM_MINMAX_RNDNU_2X2__SCALAR, strided_cm_subtile) {
- for (size_t k = 1; k <= 5; k += 2) {
- for (uint32_t m = 1; m <= 2; m++) {
- for (uint32_t n = 1; n <= 2; n++) {
- GemmMicrokernelTester()
- .mr(2)
- .nr(2)
- .kr(1)
- .sr(1)
- .m(m)
- .n(n)
- .k(k)
- .cm_stride(5)
- .iterations(1)
- .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_2x2__scalar, xnn_init_qs8_conv_minmax_rndnu_scalar_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
- }
- }
- }
-}
-
-TEST(QS8_IGEMM_MINMAX_RNDNU_2X2__SCALAR, a_offset) {
- for (size_t k = 1; k <= 5; k += 2) {
- GemmMicrokernelTester()
- .mr(2)
- .nr(2)
- .kr(1)
- .sr(1)
- .m(2)
- .n(2)
- .k(k)
- .ks(3)
- .a_offset(13)
- .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_2x2__scalar, xnn_init_qs8_conv_minmax_rndnu_scalar_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
- }
-}
-
-TEST(QS8_IGEMM_MINMAX_RNDNU_2X2__SCALAR, zero) {
- for (uint32_t mz = 0; mz < 2; mz++) {
- for (size_t k = 1; k <= 5; k += 2) {
- GemmMicrokernelTester()
- .mr(2)
- .nr(2)
- .kr(1)
- .sr(1)
- .m(2)
- .n(2)
- .k(k)
- .ks(3)
- .a_offset(13)
- .zero_index(mz)
- .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_2x2__scalar, xnn_init_qs8_conv_minmax_rndnu_scalar_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
- }
- }
-}
-
-TEST(QS8_IGEMM_MINMAX_RNDNU_2X2__SCALAR, qmin) {
- GemmMicrokernelTester()
- .mr(2)
- .nr(2)
- .kr(1)
- .sr(1)
- .m(2)
- .n(2)
- .k(1)
- .qmin(128)
- .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_2x2__scalar, xnn_init_qs8_conv_minmax_rndnu_scalar_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
-}
-
-TEST(QS8_IGEMM_MINMAX_RNDNU_2X2__SCALAR, qmax) {
- GemmMicrokernelTester()
- .mr(2)
- .nr(2)
- .kr(1)
- .sr(1)
- .m(2)
- .n(2)
- .k(1)
- .qmax(128)
- .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_2x2__scalar, xnn_init_qs8_conv_minmax_rndnu_scalar_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
-}
-
-TEST(QS8_IGEMM_MINMAX_RNDNU_2X2__SCALAR, strided_cm) {
- GemmMicrokernelTester()
- .mr(2)
- .nr(2)
- .kr(1)
- .sr(1)
- .m(2)
- .n(2)
- .k(1)
- .cm_stride(5)
- .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_2x2__scalar, xnn_init_qs8_conv_minmax_rndnu_scalar_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
-}
-
-
-TEST(QS8_IGEMM_MINMAX_RNDNU_3X2__SCALAR, k_eq_1) {
- GemmMicrokernelTester()
- .mr(3)
- .nr(2)
- .kr(1)
- .sr(1)
- .m(3)
- .n(2)
- .k(1)
- .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_3x2__scalar, xnn_init_qs8_conv_minmax_rndnu_scalar_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
-}
-
-TEST(QS8_IGEMM_MINMAX_RNDNU_3X2__SCALAR, strided_cn) {
- GemmMicrokernelTester()
- .mr(3)
- .nr(2)
- .kr(1)
- .sr(1)
- .m(3)
- .n(2)
- .k(1)
- .cn_stride(5)
- .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_3x2__scalar, xnn_init_qs8_conv_minmax_rndnu_scalar_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
-}
-
-TEST(QS8_IGEMM_MINMAX_RNDNU_3X2__SCALAR, k_eq_1_subtile) {
- for (uint32_t m = 1; m <= 3; m++) {
- for (uint32_t n = 1; n <= 2; n++) {
- GemmMicrokernelTester()
- .mr(3)
- .nr(2)
- .kr(1)
- .sr(1)
- .m(m)
- .n(n)
- .k(1)
- .iterations(1)
- .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_3x2__scalar, xnn_init_qs8_conv_minmax_rndnu_scalar_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
- }
- }
-}
-
-TEST(QS8_IGEMM_MINMAX_RNDNU_3X2__SCALAR, k_eq_1_subtile_m) {
- for (uint32_t m = 1; m <= 3; m++) {
- GemmMicrokernelTester()
- .mr(3)
- .nr(2)
- .kr(1)
- .sr(1)
- .m(m)
- .n(2)
- .k(1)
- .iterations(1)
- .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_3x2__scalar, xnn_init_qs8_conv_minmax_rndnu_scalar_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
- }
-}
-
-TEST(QS8_IGEMM_MINMAX_RNDNU_3X2__SCALAR, k_eq_1_subtile_n) {
- for (uint32_t n = 1; n <= 2; n++) {
- GemmMicrokernelTester()
- .mr(3)
- .nr(2)
- .kr(1)
- .sr(1)
- .m(3)
- .n(n)
- .k(1)
- .iterations(1)
- .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_3x2__scalar, xnn_init_qs8_conv_minmax_rndnu_scalar_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
- }
-}
-
-TEST(QS8_IGEMM_MINMAX_RNDNU_3X2__SCALAR, k_gt_1) {
- for (size_t k = 2; k < 10; k++) {
- GemmMicrokernelTester()
- .mr(3)
- .nr(2)
- .kr(1)
- .sr(1)
- .m(3)
- .n(2)
- .k(k)
- .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_3x2__scalar, xnn_init_qs8_conv_minmax_rndnu_scalar_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
- }
-}
-
-TEST(QS8_IGEMM_MINMAX_RNDNU_3X2__SCALAR, k_gt_1_subtile) {
- for (size_t k = 2; k < 10; k++) {
- for (uint32_t m = 1; m <= 3; m++) {
- for (uint32_t n = 1; n <= 2; n++) {
- GemmMicrokernelTester()
- .mr(3)
- .nr(2)
- .kr(1)
- .sr(1)
- .m(m)
- .n(n)
- .k(k)
- .iterations(1)
- .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_3x2__scalar, xnn_init_qs8_conv_minmax_rndnu_scalar_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
- }
- }
- }
-}
-
-TEST(QS8_IGEMM_MINMAX_RNDNU_3X2__SCALAR, n_gt_2) {
- for (uint32_t n = 3; n < 4; n++) {
- for (size_t k = 1; k <= 5; k += 2) {
- GemmMicrokernelTester()
- .mr(3)
- .nr(2)
- .kr(1)
- .sr(1)
- .m(3)
- .n(2)
- .k(k)
- .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_3x2__scalar, xnn_init_qs8_conv_minmax_rndnu_scalar_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
- }
- }
-}
-
-TEST(QS8_IGEMM_MINMAX_RNDNU_3X2__SCALAR, n_gt_2_strided_cn) {
- for (uint32_t n = 3; n < 4; n++) {
- for (size_t k = 1; k <= 5; k += 2) {
- GemmMicrokernelTester()
- .mr(3)
- .nr(2)
- .kr(1)
- .sr(1)
- .m(3)
- .n(2)
- .k(k)
- .cn_stride(5)
- .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_3x2__scalar, xnn_init_qs8_conv_minmax_rndnu_scalar_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
- }
- }
-}
-
-TEST(QS8_IGEMM_MINMAX_RNDNU_3X2__SCALAR, n_gt_2_subtile) {
- for (uint32_t n = 3; n < 4; n++) {
- for (size_t k = 1; k <= 5; k += 2) {
- for (uint32_t m = 1; m <= 3; m++) {
- GemmMicrokernelTester()
- .mr(3)
- .nr(2)
- .kr(1)
- .sr(1)
- .m(m)
- .n(n)
- .k(k)
- .iterations(1)
- .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_3x2__scalar, xnn_init_qs8_conv_minmax_rndnu_scalar_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
- }
- }
- }
-}
-
-TEST(QS8_IGEMM_MINMAX_RNDNU_3X2__SCALAR, n_div_2) {
- for (uint32_t n = 4; n <= 6; n += 2) {
- for (size_t k = 1; k <= 5; k += 2) {
- GemmMicrokernelTester()
- .mr(3)
- .nr(2)
- .kr(1)
- .sr(1)
- .m(3)
- .n(2)
- .k(k)
- .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_3x2__scalar, xnn_init_qs8_conv_minmax_rndnu_scalar_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
- }
- }
-}
-
-TEST(QS8_IGEMM_MINMAX_RNDNU_3X2__SCALAR, n_div_2_strided_cn) {
- for (uint32_t n = 4; n <= 6; n += 2) {
- for (size_t k = 1; k <= 5; k += 2) {
- GemmMicrokernelTester()
- .mr(3)
- .nr(2)
- .kr(1)
- .sr(1)
- .m(3)
- .n(n)
- .k(k)
- .cn_stride(5)
- .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_3x2__scalar, xnn_init_qs8_conv_minmax_rndnu_scalar_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
- }
- }
-}
-
-TEST(QS8_IGEMM_MINMAX_RNDNU_3X2__SCALAR, n_div_2_subtile) {
- for (uint32_t n = 4; n <= 6; n += 2) {
- for (size_t k = 1; k <= 5; k += 2) {
- for (uint32_t m = 1; m <= 3; m++) {
- GemmMicrokernelTester()
- .mr(3)
- .nr(2)
- .kr(1)
- .sr(1)
- .m(m)
- .n(n)
- .k(k)
- .iterations(1)
- .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_3x2__scalar, xnn_init_qs8_conv_minmax_rndnu_scalar_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
- }
- }
- }
-}
-
-TEST(QS8_IGEMM_MINMAX_RNDNU_3X2__SCALAR, small_kernel) {
- for (size_t k = 1; k <= 5; k += 2) {
- GemmMicrokernelTester()
- .mr(3)
- .nr(2)
- .kr(1)
- .sr(1)
- .m(3)
- .n(2)
- .k(k)
- .ks(3)
- .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_3x2__scalar, xnn_init_qs8_conv_minmax_rndnu_scalar_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
- }
-}
-
-TEST(QS8_IGEMM_MINMAX_RNDNU_3X2__SCALAR, small_kernel_subtile) {
- for (size_t k = 1; k <= 5; k += 2) {
- for (uint32_t m = 1; m <= 3; m++) {
- for (uint32_t n = 1; n <= 2; n++) {
- GemmMicrokernelTester()
- .mr(3)
- .nr(2)
- .kr(1)
- .sr(1)
- .m(m)
- .n(n)
- .k(k)
- .ks(3)
- .iterations(1)
- .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_3x2__scalar, xnn_init_qs8_conv_minmax_rndnu_scalar_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
- }
- }
- }
-}
-
-TEST(QS8_IGEMM_MINMAX_RNDNU_3X2__SCALAR, n_gt_2_small_kernel) {
- for (uint32_t n = 3; n < 4; n++) {
- for (size_t k = 1; k <= 5; k += 2) {
- GemmMicrokernelTester()
- .mr(3)
- .nr(2)
- .kr(1)
- .sr(1)
- .m(3)
- .n(2)
- .k(k)
- .ks(3)
- .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_3x2__scalar, xnn_init_qs8_conv_minmax_rndnu_scalar_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
- }
- }
-}
-
-TEST(QS8_IGEMM_MINMAX_RNDNU_3X2__SCALAR, n_div_2_small_kernel) {
- for (uint32_t n = 4; n <= 6; n += 2) {
- for (size_t k = 1; k <= 5; k += 2) {
- GemmMicrokernelTester()
- .mr(3)
- .nr(2)
- .kr(1)
- .sr(1)
- .m(3)
- .n(2)
- .k(k)
- .ks(3)
- .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_3x2__scalar, xnn_init_qs8_conv_minmax_rndnu_scalar_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
- }
- }
-}
-
-TEST(QS8_IGEMM_MINMAX_RNDNU_3X2__SCALAR, strided_cm_subtile) {
- for (size_t k = 1; k <= 5; k += 2) {
- for (uint32_t m = 1; m <= 3; m++) {
- for (uint32_t n = 1; n <= 2; n++) {
- GemmMicrokernelTester()
- .mr(3)
- .nr(2)
- .kr(1)
- .sr(1)
- .m(m)
- .n(n)
- .k(k)
- .cm_stride(5)
- .iterations(1)
- .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_3x2__scalar, xnn_init_qs8_conv_minmax_rndnu_scalar_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
- }
- }
- }
-}
-
-TEST(QS8_IGEMM_MINMAX_RNDNU_3X2__SCALAR, a_offset) {
- for (size_t k = 1; k <= 5; k += 2) {
- GemmMicrokernelTester()
- .mr(3)
- .nr(2)
- .kr(1)
- .sr(1)
- .m(3)
- .n(2)
- .k(k)
- .ks(3)
- .a_offset(17)
- .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_3x2__scalar, xnn_init_qs8_conv_minmax_rndnu_scalar_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
- }
-}
-
-TEST(QS8_IGEMM_MINMAX_RNDNU_3X2__SCALAR, zero) {
- for (uint32_t mz = 0; mz < 3; mz++) {
- for (size_t k = 1; k <= 5; k += 2) {
- GemmMicrokernelTester()
- .mr(3)
- .nr(2)
- .kr(1)
- .sr(1)
- .m(3)
- .n(2)
- .k(k)
- .ks(3)
- .a_offset(17)
- .zero_index(mz)
- .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_3x2__scalar, xnn_init_qs8_conv_minmax_rndnu_scalar_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
- }
- }
-}
-
-TEST(QS8_IGEMM_MINMAX_RNDNU_3X2__SCALAR, qmin) {
- GemmMicrokernelTester()
- .mr(3)
- .nr(2)
- .kr(1)
- .sr(1)
- .m(3)
- .n(2)
- .k(1)
- .qmin(128)
- .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_3x2__scalar, xnn_init_qs8_conv_minmax_rndnu_scalar_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
-}
-
-TEST(QS8_IGEMM_MINMAX_RNDNU_3X2__SCALAR, qmax) {
- GemmMicrokernelTester()
- .mr(3)
- .nr(2)
- .kr(1)
- .sr(1)
- .m(3)
- .n(2)
- .k(1)
- .qmax(128)
- .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_3x2__scalar, xnn_init_qs8_conv_minmax_rndnu_scalar_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
-}
-
-TEST(QS8_IGEMM_MINMAX_RNDNU_3X2__SCALAR, strided_cm) {
- GemmMicrokernelTester()
- .mr(3)
- .nr(2)
- .kr(1)
- .sr(1)
- .m(3)
- .n(2)
- .k(1)
- .cm_stride(5)
- .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_3x2__scalar, xnn_init_qs8_conv_minmax_rndnu_scalar_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
-}
-
-
-TEST(QS8_IGEMM_MINMAX_RNDNU_4X2__SCALAR, k_eq_1) {
- GemmMicrokernelTester()
- .mr(4)
- .nr(2)
- .kr(1)
- .sr(1)
- .m(4)
- .n(2)
- .k(1)
- .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_4x2__scalar, xnn_init_qs8_conv_minmax_rndnu_scalar_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
-}
-
-TEST(QS8_IGEMM_MINMAX_RNDNU_4X2__SCALAR, strided_cn) {
- GemmMicrokernelTester()
- .mr(4)
- .nr(2)
- .kr(1)
- .sr(1)
- .m(4)
- .n(2)
- .k(1)
- .cn_stride(5)
- .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_4x2__scalar, xnn_init_qs8_conv_minmax_rndnu_scalar_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
-}
-
-TEST(QS8_IGEMM_MINMAX_RNDNU_4X2__SCALAR, k_eq_1_subtile) {
- for (uint32_t m = 1; m <= 4; m++) {
- for (uint32_t n = 1; n <= 2; n++) {
- GemmMicrokernelTester()
- .mr(4)
- .nr(2)
- .kr(1)
- .sr(1)
- .m(m)
- .n(n)
- .k(1)
- .iterations(1)
- .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_4x2__scalar, xnn_init_qs8_conv_minmax_rndnu_scalar_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
- }
- }
-}
-
-TEST(QS8_IGEMM_MINMAX_RNDNU_4X2__SCALAR, k_eq_1_subtile_m) {
- for (uint32_t m = 1; m <= 4; m++) {
- GemmMicrokernelTester()
- .mr(4)
- .nr(2)
- .kr(1)
- .sr(1)
- .m(m)
- .n(2)
- .k(1)
- .iterations(1)
- .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_4x2__scalar, xnn_init_qs8_conv_minmax_rndnu_scalar_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
- }
-}
-
-TEST(QS8_IGEMM_MINMAX_RNDNU_4X2__SCALAR, k_eq_1_subtile_n) {
- for (uint32_t n = 1; n <= 2; n++) {
- GemmMicrokernelTester()
- .mr(4)
- .nr(2)
- .kr(1)
- .sr(1)
- .m(4)
- .n(n)
- .k(1)
- .iterations(1)
- .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_4x2__scalar, xnn_init_qs8_conv_minmax_rndnu_scalar_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
- }
-}
-
-TEST(QS8_IGEMM_MINMAX_RNDNU_4X2__SCALAR, k_gt_1) {
- for (size_t k = 2; k < 10; k++) {
- GemmMicrokernelTester()
- .mr(4)
- .nr(2)
- .kr(1)
- .sr(1)
- .m(4)
- .n(2)
- .k(k)
- .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_4x2__scalar, xnn_init_qs8_conv_minmax_rndnu_scalar_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
- }
-}
-
-TEST(QS8_IGEMM_MINMAX_RNDNU_4X2__SCALAR, k_gt_1_subtile) {
- for (size_t k = 2; k < 10; k++) {
- for (uint32_t m = 1; m <= 4; m++) {
- for (uint32_t n = 1; n <= 2; n++) {
- GemmMicrokernelTester()
- .mr(4)
- .nr(2)
- .kr(1)
- .sr(1)
- .m(m)
- .n(n)
- .k(k)
- .iterations(1)
- .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_4x2__scalar, xnn_init_qs8_conv_minmax_rndnu_scalar_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
- }
- }
- }
-}
-
-TEST(QS8_IGEMM_MINMAX_RNDNU_4X2__SCALAR, n_gt_2) {
- for (uint32_t n = 3; n < 4; n++) {
- for (size_t k = 1; k <= 5; k += 2) {
- GemmMicrokernelTester()
- .mr(4)
- .nr(2)
- .kr(1)
- .sr(1)
- .m(4)
- .n(2)
- .k(k)
- .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_4x2__scalar, xnn_init_qs8_conv_minmax_rndnu_scalar_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
- }
- }
-}
-
-TEST(QS8_IGEMM_MINMAX_RNDNU_4X2__SCALAR, n_gt_2_strided_cn) {
- for (uint32_t n = 3; n < 4; n++) {
- for (size_t k = 1; k <= 5; k += 2) {
- GemmMicrokernelTester()
- .mr(4)
- .nr(2)
- .kr(1)
- .sr(1)
- .m(4)
- .n(2)
- .k(k)
- .cn_stride(5)
- .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_4x2__scalar, xnn_init_qs8_conv_minmax_rndnu_scalar_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
- }
- }
-}
-
-TEST(QS8_IGEMM_MINMAX_RNDNU_4X2__SCALAR, n_gt_2_subtile) {
- for (uint32_t n = 3; n < 4; n++) {
- for (size_t k = 1; k <= 5; k += 2) {
- for (uint32_t m = 1; m <= 4; m++) {
- GemmMicrokernelTester()
- .mr(4)
- .nr(2)
- .kr(1)
- .sr(1)
- .m(m)
- .n(n)
- .k(k)
- .iterations(1)
- .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_4x2__scalar, xnn_init_qs8_conv_minmax_rndnu_scalar_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
- }
- }
- }
-}
-
-TEST(QS8_IGEMM_MINMAX_RNDNU_4X2__SCALAR, n_div_2) {
- for (uint32_t n = 4; n <= 6; n += 2) {
- for (size_t k = 1; k <= 5; k += 2) {
- GemmMicrokernelTester()
- .mr(4)
- .nr(2)
- .kr(1)
- .sr(1)
- .m(4)
- .n(2)
- .k(k)
- .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_4x2__scalar, xnn_init_qs8_conv_minmax_rndnu_scalar_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
- }
- }
-}
-
-TEST(QS8_IGEMM_MINMAX_RNDNU_4X2__SCALAR, n_div_2_strided_cn) {
- for (uint32_t n = 4; n <= 6; n += 2) {
- for (size_t k = 1; k <= 5; k += 2) {
- GemmMicrokernelTester()
- .mr(4)
- .nr(2)
- .kr(1)
- .sr(1)
- .m(4)
- .n(n)
- .k(k)
- .cn_stride(5)
- .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_4x2__scalar, xnn_init_qs8_conv_minmax_rndnu_scalar_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
- }
- }
-}
-
-TEST(QS8_IGEMM_MINMAX_RNDNU_4X2__SCALAR, n_div_2_subtile) {
- for (uint32_t n = 4; n <= 6; n += 2) {
- for (size_t k = 1; k <= 5; k += 2) {
- for (uint32_t m = 1; m <= 4; m++) {
- GemmMicrokernelTester()
- .mr(4)
- .nr(2)
- .kr(1)
- .sr(1)
- .m(m)
- .n(n)
- .k(k)
- .iterations(1)
- .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_4x2__scalar, xnn_init_qs8_conv_minmax_rndnu_scalar_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
- }
- }
- }
-}
-
-TEST(QS8_IGEMM_MINMAX_RNDNU_4X2__SCALAR, small_kernel) {
- for (size_t k = 1; k <= 5; k += 2) {
- GemmMicrokernelTester()
- .mr(4)
- .nr(2)
- .kr(1)
- .sr(1)
- .m(4)
- .n(2)
- .k(k)
- .ks(3)
- .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_4x2__scalar, xnn_init_qs8_conv_minmax_rndnu_scalar_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
- }
-}
-
-TEST(QS8_IGEMM_MINMAX_RNDNU_4X2__SCALAR, small_kernel_subtile) {
- for (size_t k = 1; k <= 5; k += 2) {
- for (uint32_t m = 1; m <= 4; m++) {
- for (uint32_t n = 1; n <= 2; n++) {
- GemmMicrokernelTester()
- .mr(4)
- .nr(2)
- .kr(1)
- .sr(1)
- .m(m)
- .n(n)
- .k(k)
- .ks(3)
- .iterations(1)
- .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_4x2__scalar, xnn_init_qs8_conv_minmax_rndnu_scalar_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
- }
- }
- }
-}
-
-TEST(QS8_IGEMM_MINMAX_RNDNU_4X2__SCALAR, n_gt_2_small_kernel) {
- for (uint32_t n = 3; n < 4; n++) {
- for (size_t k = 1; k <= 5; k += 2) {
- GemmMicrokernelTester()
- .mr(4)
- .nr(2)
- .kr(1)
- .sr(1)
- .m(4)
- .n(2)
- .k(k)
- .ks(3)
- .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_4x2__scalar, xnn_init_qs8_conv_minmax_rndnu_scalar_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
- }
- }
-}
-
-TEST(QS8_IGEMM_MINMAX_RNDNU_4X2__SCALAR, n_div_2_small_kernel) {
- for (uint32_t n = 4; n <= 6; n += 2) {
- for (size_t k = 1; k <= 5; k += 2) {
- GemmMicrokernelTester()
- .mr(4)
- .nr(2)
- .kr(1)
- .sr(1)
- .m(4)
- .n(2)
- .k(k)
- .ks(3)
- .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_4x2__scalar, xnn_init_qs8_conv_minmax_rndnu_scalar_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
- }
- }
-}
-
-TEST(QS8_IGEMM_MINMAX_RNDNU_4X2__SCALAR, strided_cm_subtile) {
- for (size_t k = 1; k <= 5; k += 2) {
- for (uint32_t m = 1; m <= 4; m++) {
- for (uint32_t n = 1; n <= 2; n++) {
- GemmMicrokernelTester()
- .mr(4)
- .nr(2)
- .kr(1)
- .sr(1)
- .m(m)
- .n(n)
- .k(k)
- .cm_stride(5)
- .iterations(1)
- .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_4x2__scalar, xnn_init_qs8_conv_minmax_rndnu_scalar_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
- }
- }
- }
-}
-
-TEST(QS8_IGEMM_MINMAX_RNDNU_4X2__SCALAR, a_offset) {
- for (size_t k = 1; k <= 5; k += 2) {
- GemmMicrokernelTester()
- .mr(4)
- .nr(2)
- .kr(1)
- .sr(1)
- .m(4)
- .n(2)
- .k(k)
- .ks(3)
- .a_offset(23)
- .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_4x2__scalar, xnn_init_qs8_conv_minmax_rndnu_scalar_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
- }
-}
-
-TEST(QS8_IGEMM_MINMAX_RNDNU_4X2__SCALAR, zero) {
- for (uint32_t mz = 0; mz < 4; mz++) {
- for (size_t k = 1; k <= 5; k += 2) {
- GemmMicrokernelTester()
- .mr(4)
- .nr(2)
- .kr(1)
- .sr(1)
- .m(4)
- .n(2)
- .k(k)
- .ks(3)
- .a_offset(23)
- .zero_index(mz)
- .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_4x2__scalar, xnn_init_qs8_conv_minmax_rndnu_scalar_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
- }
- }
-}
-
-TEST(QS8_IGEMM_MINMAX_RNDNU_4X2__SCALAR, qmin) {
- GemmMicrokernelTester()
- .mr(4)
- .nr(2)
- .kr(1)
- .sr(1)
- .m(4)
- .n(2)
- .k(1)
- .qmin(128)
- .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_4x2__scalar, xnn_init_qs8_conv_minmax_rndnu_scalar_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
-}
-
-TEST(QS8_IGEMM_MINMAX_RNDNU_4X2__SCALAR, qmax) {
- GemmMicrokernelTester()
- .mr(4)
- .nr(2)
- .kr(1)
- .sr(1)
- .m(4)
- .n(2)
- .k(1)
- .qmax(128)
- .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_4x2__scalar, xnn_init_qs8_conv_minmax_rndnu_scalar_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
-}
-
-TEST(QS8_IGEMM_MINMAX_RNDNU_4X2__SCALAR, strided_cm) {
- GemmMicrokernelTester()
- .mr(4)
- .nr(2)
- .kr(1)
- .sr(1)
- .m(4)
- .n(2)
- .k(1)
- .cm_stride(5)
- .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_4x2__scalar, xnn_init_qs8_conv_minmax_rndnu_scalar_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
-}
-
-
-TEST(QS8_IGEMM_MINMAX_RNDNU_1X4__SCALAR, k_eq_1) {
- GemmMicrokernelTester()
- .mr(1)
- .nr(4)
- .kr(1)
- .sr(1)
- .m(1)
- .n(4)
- .k(1)
- .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_1x4__scalar, xnn_init_qs8_conv_minmax_rndnu_scalar_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
-}
-
-TEST(QS8_IGEMM_MINMAX_RNDNU_1X4__SCALAR, strided_cn) {
- GemmMicrokernelTester()
- .mr(1)
- .nr(4)
- .kr(1)
- .sr(1)
- .m(1)
- .n(4)
- .k(1)
- .cn_stride(7)
- .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_1x4__scalar, xnn_init_qs8_conv_minmax_rndnu_scalar_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
-}
-
-TEST(QS8_IGEMM_MINMAX_RNDNU_1X4__SCALAR, k_eq_1_subtile) {
- for (uint32_t m = 1; m <= 1; m++) {
- for (uint32_t n = 1; n <= 4; n++) {
- GemmMicrokernelTester()
- .mr(1)
- .nr(4)
- .kr(1)
- .sr(1)
- .m(m)
- .n(n)
- .k(1)
- .iterations(1)
- .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_1x4__scalar, xnn_init_qs8_conv_minmax_rndnu_scalar_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
- }
- }
-}
-
-TEST(QS8_IGEMM_MINMAX_RNDNU_1X4__SCALAR, k_eq_1_subtile_m) {
- for (uint32_t m = 1; m <= 1; m++) {
- GemmMicrokernelTester()
- .mr(1)
- .nr(4)
- .kr(1)
- .sr(1)
- .m(m)
- .n(4)
- .k(1)
- .iterations(1)
- .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_1x4__scalar, xnn_init_qs8_conv_minmax_rndnu_scalar_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
- }
-}
-
-TEST(QS8_IGEMM_MINMAX_RNDNU_1X4__SCALAR, k_eq_1_subtile_n) {
- for (uint32_t n = 1; n <= 4; n++) {
- GemmMicrokernelTester()
- .mr(1)
- .nr(4)
- .kr(1)
- .sr(1)
- .m(1)
- .n(n)
- .k(1)
- .iterations(1)
- .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_1x4__scalar, xnn_init_qs8_conv_minmax_rndnu_scalar_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
- }
-}
-
-TEST(QS8_IGEMM_MINMAX_RNDNU_1X4__SCALAR, k_gt_1) {
- for (size_t k = 2; k < 10; k++) {
- GemmMicrokernelTester()
- .mr(1)
- .nr(4)
- .kr(1)
- .sr(1)
- .m(1)
- .n(4)
- .k(k)
- .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_1x4__scalar, xnn_init_qs8_conv_minmax_rndnu_scalar_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
- }
-}
-
-TEST(QS8_IGEMM_MINMAX_RNDNU_1X4__SCALAR, k_gt_1_subtile) {
- for (size_t k = 2; k < 10; k++) {
- for (uint32_t m = 1; m <= 1; m++) {
- for (uint32_t n = 1; n <= 4; n++) {
- GemmMicrokernelTester()
- .mr(1)
- .nr(4)
- .kr(1)
- .sr(1)
- .m(m)
- .n(n)
- .k(k)
- .iterations(1)
- .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_1x4__scalar, xnn_init_qs8_conv_minmax_rndnu_scalar_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
- }
- }
- }
-}
-
-TEST(QS8_IGEMM_MINMAX_RNDNU_1X4__SCALAR, n_gt_4) {
- for (uint32_t n = 5; n < 8; n++) {
- for (size_t k = 1; k <= 5; k += 2) {
- GemmMicrokernelTester()
- .mr(1)
- .nr(4)
- .kr(1)
- .sr(1)
- .m(1)
- .n(4)
- .k(k)
- .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_1x4__scalar, xnn_init_qs8_conv_minmax_rndnu_scalar_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
- }
- }
-}
-
-TEST(QS8_IGEMM_MINMAX_RNDNU_1X4__SCALAR, n_gt_4_strided_cn) {
- for (uint32_t n = 5; n < 8; n++) {
- for (size_t k = 1; k <= 5; k += 2) {
- GemmMicrokernelTester()
- .mr(1)
- .nr(4)
- .kr(1)
- .sr(1)
- .m(1)
- .n(4)
- .k(k)
- .cn_stride(7)
- .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_1x4__scalar, xnn_init_qs8_conv_minmax_rndnu_scalar_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
- }
- }
-}
-
-TEST(QS8_IGEMM_MINMAX_RNDNU_1X4__SCALAR, n_gt_4_subtile) {
- for (uint32_t n = 5; n < 8; n++) {
- for (size_t k = 1; k <= 5; k += 2) {
- for (uint32_t m = 1; m <= 1; m++) {
- GemmMicrokernelTester()
- .mr(1)
- .nr(4)
- .kr(1)
- .sr(1)
- .m(m)
- .n(n)
- .k(k)
- .iterations(1)
- .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_1x4__scalar, xnn_init_qs8_conv_minmax_rndnu_scalar_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
- }
- }
- }
-}
-
-TEST(QS8_IGEMM_MINMAX_RNDNU_1X4__SCALAR, n_div_4) {
- for (uint32_t n = 8; n <= 12; n += 4) {
- for (size_t k = 1; k <= 5; k += 2) {
- GemmMicrokernelTester()
- .mr(1)
- .nr(4)
- .kr(1)
- .sr(1)
- .m(1)
- .n(4)
- .k(k)
- .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_1x4__scalar, xnn_init_qs8_conv_minmax_rndnu_scalar_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
- }
- }
-}
-
-TEST(QS8_IGEMM_MINMAX_RNDNU_1X4__SCALAR, n_div_4_strided_cn) {
- for (uint32_t n = 8; n <= 12; n += 4) {
- for (size_t k = 1; k <= 5; k += 2) {
- GemmMicrokernelTester()
- .mr(1)
- .nr(4)
- .kr(1)
- .sr(1)
- .m(1)
- .n(n)
- .k(k)
- .cn_stride(7)
- .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_1x4__scalar, xnn_init_qs8_conv_minmax_rndnu_scalar_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
- }
- }
-}
-
-TEST(QS8_IGEMM_MINMAX_RNDNU_1X4__SCALAR, n_div_4_subtile) {
- for (uint32_t n = 8; n <= 12; n += 4) {
- for (size_t k = 1; k <= 5; k += 2) {
- for (uint32_t m = 1; m <= 1; m++) {
- GemmMicrokernelTester()
- .mr(1)
- .nr(4)
- .kr(1)
- .sr(1)
- .m(m)
- .n(n)
- .k(k)
- .iterations(1)
- .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_1x4__scalar, xnn_init_qs8_conv_minmax_rndnu_scalar_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
- }
- }
- }
-}
-
-TEST(QS8_IGEMM_MINMAX_RNDNU_1X4__SCALAR, small_kernel) {
- for (size_t k = 1; k <= 5; k += 2) {
- GemmMicrokernelTester()
- .mr(1)
- .nr(4)
- .kr(1)
- .sr(1)
- .m(1)
- .n(4)
- .k(k)
- .ks(3)
- .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_1x4__scalar, xnn_init_qs8_conv_minmax_rndnu_scalar_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
- }
-}
-
-TEST(QS8_IGEMM_MINMAX_RNDNU_1X4__SCALAR, small_kernel_subtile) {
- for (size_t k = 1; k <= 5; k += 2) {
- for (uint32_t m = 1; m <= 1; m++) {
- for (uint32_t n = 1; n <= 4; n++) {
- GemmMicrokernelTester()
- .mr(1)
- .nr(4)
- .kr(1)
- .sr(1)
- .m(m)
- .n(n)
- .k(k)
- .ks(3)
- .iterations(1)
- .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_1x4__scalar, xnn_init_qs8_conv_minmax_rndnu_scalar_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
- }
- }
- }
-}
-
-TEST(QS8_IGEMM_MINMAX_RNDNU_1X4__SCALAR, n_gt_4_small_kernel) {
- for (uint32_t n = 5; n < 8; n++) {
- for (size_t k = 1; k <= 5; k += 2) {
- GemmMicrokernelTester()
- .mr(1)
- .nr(4)
- .kr(1)
- .sr(1)
- .m(1)
- .n(4)
- .k(k)
- .ks(3)
- .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_1x4__scalar, xnn_init_qs8_conv_minmax_rndnu_scalar_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
- }
- }
-}
-
-TEST(QS8_IGEMM_MINMAX_RNDNU_1X4__SCALAR, n_div_4_small_kernel) {
- for (uint32_t n = 8; n <= 12; n += 4) {
- for (size_t k = 1; k <= 5; k += 2) {
- GemmMicrokernelTester()
- .mr(1)
- .nr(4)
- .kr(1)
- .sr(1)
- .m(1)
- .n(4)
- .k(k)
- .ks(3)
- .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_1x4__scalar, xnn_init_qs8_conv_minmax_rndnu_scalar_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
- }
- }
-}
-
-TEST(QS8_IGEMM_MINMAX_RNDNU_1X4__SCALAR, strided_cm_subtile) {
- for (size_t k = 1; k <= 5; k += 2) {
- for (uint32_t m = 1; m <= 1; m++) {
- for (uint32_t n = 1; n <= 4; n++) {
- GemmMicrokernelTester()
- .mr(1)
- .nr(4)
- .kr(1)
- .sr(1)
- .m(m)
- .n(n)
- .k(k)
- .cm_stride(7)
- .iterations(1)
- .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_1x4__scalar, xnn_init_qs8_conv_minmax_rndnu_scalar_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
- }
- }
- }
-}
-
-TEST(QS8_IGEMM_MINMAX_RNDNU_1X4__SCALAR, a_offset) {
- for (size_t k = 1; k <= 5; k += 2) {
- GemmMicrokernelTester()
- .mr(1)
- .nr(4)
- .kr(1)
- .sr(1)
- .m(1)
- .n(4)
- .k(k)
- .ks(3)
- .a_offset(7)
- .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_1x4__scalar, xnn_init_qs8_conv_minmax_rndnu_scalar_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
- }
-}
-
-TEST(QS8_IGEMM_MINMAX_RNDNU_1X4__SCALAR, zero) {
- for (uint32_t mz = 0; mz < 1; mz++) {
- for (size_t k = 1; k <= 5; k += 2) {
- GemmMicrokernelTester()
- .mr(1)
- .nr(4)
- .kr(1)
- .sr(1)
- .m(1)
- .n(4)
- .k(k)
- .ks(3)
- .a_offset(7)
- .zero_index(mz)
- .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_1x4__scalar, xnn_init_qs8_conv_minmax_rndnu_scalar_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
- }
- }
-}
-
-TEST(QS8_IGEMM_MINMAX_RNDNU_1X4__SCALAR, qmin) {
- GemmMicrokernelTester()
- .mr(1)
- .nr(4)
- .kr(1)
- .sr(1)
- .m(1)
- .n(4)
- .k(1)
- .qmin(128)
- .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_1x4__scalar, xnn_init_qs8_conv_minmax_rndnu_scalar_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
-}
-
-TEST(QS8_IGEMM_MINMAX_RNDNU_1X4__SCALAR, qmax) {
- GemmMicrokernelTester()
- .mr(1)
- .nr(4)
- .kr(1)
- .sr(1)
- .m(1)
- .n(4)
- .k(1)
- .qmax(128)
- .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_1x4__scalar, xnn_init_qs8_conv_minmax_rndnu_scalar_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
-}
-
-TEST(QS8_IGEMM_MINMAX_RNDNU_1X4__SCALAR, strided_cm) {
- GemmMicrokernelTester()
- .mr(1)
- .nr(4)
- .kr(1)
- .sr(1)
- .m(1)
- .n(4)
- .k(1)
- .cm_stride(7)
- .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_1x4__scalar, xnn_init_qs8_conv_minmax_rndnu_scalar_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
-}
-
-
-TEST(QS8_IGEMM_MINMAX_RNDNU_2X4__SCALAR, k_eq_1) {
- GemmMicrokernelTester()
- .mr(2)
- .nr(4)
- .kr(1)
- .sr(1)
- .m(2)
- .n(4)
- .k(1)
- .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_2x4__scalar, xnn_init_qs8_conv_minmax_rndnu_scalar_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
-}
-
-TEST(QS8_IGEMM_MINMAX_RNDNU_2X4__SCALAR, strided_cn) {
- GemmMicrokernelTester()
- .mr(2)
- .nr(4)
- .kr(1)
- .sr(1)
- .m(2)
- .n(4)
- .k(1)
- .cn_stride(7)
- .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_2x4__scalar, xnn_init_qs8_conv_minmax_rndnu_scalar_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
-}
-
-TEST(QS8_IGEMM_MINMAX_RNDNU_2X4__SCALAR, k_eq_1_subtile) {
- for (uint32_t m = 1; m <= 2; m++) {
- for (uint32_t n = 1; n <= 4; n++) {
- GemmMicrokernelTester()
- .mr(2)
- .nr(4)
- .kr(1)
- .sr(1)
- .m(m)
- .n(n)
- .k(1)
- .iterations(1)
- .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_2x4__scalar, xnn_init_qs8_conv_minmax_rndnu_scalar_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
- }
- }
-}
-
-TEST(QS8_IGEMM_MINMAX_RNDNU_2X4__SCALAR, k_eq_1_subtile_m) {
- for (uint32_t m = 1; m <= 2; m++) {
- GemmMicrokernelTester()
- .mr(2)
- .nr(4)
- .kr(1)
- .sr(1)
- .m(m)
- .n(4)
- .k(1)
- .iterations(1)
- .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_2x4__scalar, xnn_init_qs8_conv_minmax_rndnu_scalar_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
- }
-}
-
-TEST(QS8_IGEMM_MINMAX_RNDNU_2X4__SCALAR, k_eq_1_subtile_n) {
- for (uint32_t n = 1; n <= 4; n++) {
- GemmMicrokernelTester()
- .mr(2)
- .nr(4)
- .kr(1)
- .sr(1)
- .m(2)
- .n(n)
- .k(1)
- .iterations(1)
- .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_2x4__scalar, xnn_init_qs8_conv_minmax_rndnu_scalar_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
- }
-}
-
-TEST(QS8_IGEMM_MINMAX_RNDNU_2X4__SCALAR, k_gt_1) {
- for (size_t k = 2; k < 10; k++) {
- GemmMicrokernelTester()
- .mr(2)
- .nr(4)
- .kr(1)
- .sr(1)
- .m(2)
- .n(4)
- .k(k)
- .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_2x4__scalar, xnn_init_qs8_conv_minmax_rndnu_scalar_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
- }
-}
-
-TEST(QS8_IGEMM_MINMAX_RNDNU_2X4__SCALAR, k_gt_1_subtile) {
- for (size_t k = 2; k < 10; k++) {
- for (uint32_t m = 1; m <= 2; m++) {
- for (uint32_t n = 1; n <= 4; n++) {
- GemmMicrokernelTester()
- .mr(2)
- .nr(4)
- .kr(1)
- .sr(1)
- .m(m)
- .n(n)
- .k(k)
- .iterations(1)
- .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_2x4__scalar, xnn_init_qs8_conv_minmax_rndnu_scalar_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
- }
- }
- }
-}
-
-TEST(QS8_IGEMM_MINMAX_RNDNU_2X4__SCALAR, n_gt_4) {
- for (uint32_t n = 5; n < 8; n++) {
- for (size_t k = 1; k <= 5; k += 2) {
- GemmMicrokernelTester()
- .mr(2)
- .nr(4)
- .kr(1)
- .sr(1)
- .m(2)
- .n(4)
- .k(k)
- .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_2x4__scalar, xnn_init_qs8_conv_minmax_rndnu_scalar_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
- }
- }
-}
-
-TEST(QS8_IGEMM_MINMAX_RNDNU_2X4__SCALAR, n_gt_4_strided_cn) {
- for (uint32_t n = 5; n < 8; n++) {
- for (size_t k = 1; k <= 5; k += 2) {
- GemmMicrokernelTester()
- .mr(2)
- .nr(4)
- .kr(1)
- .sr(1)
- .m(2)
- .n(4)
- .k(k)
- .cn_stride(7)
- .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_2x4__scalar, xnn_init_qs8_conv_minmax_rndnu_scalar_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
- }
- }
-}
-
-TEST(QS8_IGEMM_MINMAX_RNDNU_2X4__SCALAR, n_gt_4_subtile) {
- for (uint32_t n = 5; n < 8; n++) {
- for (size_t k = 1; k <= 5; k += 2) {
- for (uint32_t m = 1; m <= 2; m++) {
- GemmMicrokernelTester()
- .mr(2)
- .nr(4)
- .kr(1)
- .sr(1)
- .m(m)
- .n(n)
- .k(k)
- .iterations(1)
- .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_2x4__scalar, xnn_init_qs8_conv_minmax_rndnu_scalar_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
- }
- }
- }
-}
-
-TEST(QS8_IGEMM_MINMAX_RNDNU_2X4__SCALAR, n_div_4) {
- for (uint32_t n = 8; n <= 12; n += 4) {
- for (size_t k = 1; k <= 5; k += 2) {
- GemmMicrokernelTester()
- .mr(2)
- .nr(4)
- .kr(1)
- .sr(1)
- .m(2)
- .n(4)
- .k(k)
- .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_2x4__scalar, xnn_init_qs8_conv_minmax_rndnu_scalar_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
- }
- }
-}
-
-TEST(QS8_IGEMM_MINMAX_RNDNU_2X4__SCALAR, n_div_4_strided_cn) {
- for (uint32_t n = 8; n <= 12; n += 4) {
- for (size_t k = 1; k <= 5; k += 2) {
- GemmMicrokernelTester()
- .mr(2)
- .nr(4)
- .kr(1)
- .sr(1)
- .m(2)
- .n(n)
- .k(k)
- .cn_stride(7)
- .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_2x4__scalar, xnn_init_qs8_conv_minmax_rndnu_scalar_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
- }
- }
-}
-
-TEST(QS8_IGEMM_MINMAX_RNDNU_2X4__SCALAR, n_div_4_subtile) {
- for (uint32_t n = 8; n <= 12; n += 4) {
- for (size_t k = 1; k <= 5; k += 2) {
- for (uint32_t m = 1; m <= 2; m++) {
- GemmMicrokernelTester()
- .mr(2)
- .nr(4)
- .kr(1)
- .sr(1)
- .m(m)
- .n(n)
- .k(k)
- .iterations(1)
- .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_2x4__scalar, xnn_init_qs8_conv_minmax_rndnu_scalar_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
- }
- }
- }
-}
-
-TEST(QS8_IGEMM_MINMAX_RNDNU_2X4__SCALAR, small_kernel) {
- for (size_t k = 1; k <= 5; k += 2) {
- GemmMicrokernelTester()
- .mr(2)
- .nr(4)
- .kr(1)
- .sr(1)
- .m(2)
- .n(4)
- .k(k)
- .ks(3)
- .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_2x4__scalar, xnn_init_qs8_conv_minmax_rndnu_scalar_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
- }
-}
-
-TEST(QS8_IGEMM_MINMAX_RNDNU_2X4__SCALAR, small_kernel_subtile) {
- for (size_t k = 1; k <= 5; k += 2) {
- for (uint32_t m = 1; m <= 2; m++) {
- for (uint32_t n = 1; n <= 4; n++) {
- GemmMicrokernelTester()
- .mr(2)
- .nr(4)
- .kr(1)
- .sr(1)
- .m(m)
- .n(n)
- .k(k)
- .ks(3)
- .iterations(1)
- .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_2x4__scalar, xnn_init_qs8_conv_minmax_rndnu_scalar_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
- }
- }
- }
-}
-
-TEST(QS8_IGEMM_MINMAX_RNDNU_2X4__SCALAR, n_gt_4_small_kernel) {
- for (uint32_t n = 5; n < 8; n++) {
- for (size_t k = 1; k <= 5; k += 2) {
- GemmMicrokernelTester()
- .mr(2)
- .nr(4)
- .kr(1)
- .sr(1)
- .m(2)
- .n(4)
- .k(k)
- .ks(3)
- .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_2x4__scalar, xnn_init_qs8_conv_minmax_rndnu_scalar_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
- }
- }
-}
-
-TEST(QS8_IGEMM_MINMAX_RNDNU_2X4__SCALAR, n_div_4_small_kernel) {
- for (uint32_t n = 8; n <= 12; n += 4) {
- for (size_t k = 1; k <= 5; k += 2) {
- GemmMicrokernelTester()
- .mr(2)
- .nr(4)
- .kr(1)
- .sr(1)
- .m(2)
- .n(4)
- .k(k)
- .ks(3)
- .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_2x4__scalar, xnn_init_qs8_conv_minmax_rndnu_scalar_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
- }
- }
-}
-
-TEST(QS8_IGEMM_MINMAX_RNDNU_2X4__SCALAR, strided_cm_subtile) {
- for (size_t k = 1; k <= 5; k += 2) {
- for (uint32_t m = 1; m <= 2; m++) {
- for (uint32_t n = 1; n <= 4; n++) {
- GemmMicrokernelTester()
- .mr(2)
- .nr(4)
- .kr(1)
- .sr(1)
- .m(m)
- .n(n)
- .k(k)
- .cm_stride(7)
- .iterations(1)
- .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_2x4__scalar, xnn_init_qs8_conv_minmax_rndnu_scalar_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
- }
- }
- }
-}
-
-TEST(QS8_IGEMM_MINMAX_RNDNU_2X4__SCALAR, a_offset) {
- for (size_t k = 1; k <= 5; k += 2) {
- GemmMicrokernelTester()
- .mr(2)
- .nr(4)
- .kr(1)
- .sr(1)
- .m(2)
- .n(4)
- .k(k)
- .ks(3)
- .a_offset(13)
- .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_2x4__scalar, xnn_init_qs8_conv_minmax_rndnu_scalar_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
- }
-}
-
-TEST(QS8_IGEMM_MINMAX_RNDNU_2X4__SCALAR, zero) {
- for (uint32_t mz = 0; mz < 2; mz++) {
- for (size_t k = 1; k <= 5; k += 2) {
- GemmMicrokernelTester()
- .mr(2)
- .nr(4)
- .kr(1)
- .sr(1)
- .m(2)
- .n(4)
- .k(k)
- .ks(3)
- .a_offset(13)
- .zero_index(mz)
- .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_2x4__scalar, xnn_init_qs8_conv_minmax_rndnu_scalar_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
- }
- }
-}
-
-TEST(QS8_IGEMM_MINMAX_RNDNU_2X4__SCALAR, qmin) {
- GemmMicrokernelTester()
- .mr(2)
- .nr(4)
- .kr(1)
- .sr(1)
- .m(2)
- .n(4)
- .k(1)
- .qmin(128)
- .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_2x4__scalar, xnn_init_qs8_conv_minmax_rndnu_scalar_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
-}
-
-TEST(QS8_IGEMM_MINMAX_RNDNU_2X4__SCALAR, qmax) {
- GemmMicrokernelTester()
- .mr(2)
- .nr(4)
- .kr(1)
- .sr(1)
- .m(2)
- .n(4)
- .k(1)
- .qmax(128)
- .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_2x4__scalar, xnn_init_qs8_conv_minmax_rndnu_scalar_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
-}
-
-TEST(QS8_IGEMM_MINMAX_RNDNU_2X4__SCALAR, strided_cm) {
- GemmMicrokernelTester()
- .mr(2)
- .nr(4)
- .kr(1)
- .sr(1)
- .m(2)
- .n(4)
- .k(1)
- .cm_stride(7)
- .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_2x4__scalar, xnn_init_qs8_conv_minmax_rndnu_scalar_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
-}
-
-
-TEST(QS8_IGEMM_MINMAX_RNDNU_3X4__SCALAR, k_eq_1) {
- GemmMicrokernelTester()
- .mr(3)
- .nr(4)
- .kr(1)
- .sr(1)
- .m(3)
- .n(4)
- .k(1)
- .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_3x4__scalar, xnn_init_qs8_conv_minmax_rndnu_scalar_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
-}
-
-TEST(QS8_IGEMM_MINMAX_RNDNU_3X4__SCALAR, strided_cn) {
- GemmMicrokernelTester()
- .mr(3)
- .nr(4)
- .kr(1)
- .sr(1)
- .m(3)
- .n(4)
- .k(1)
- .cn_stride(7)
- .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_3x4__scalar, xnn_init_qs8_conv_minmax_rndnu_scalar_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
-}
-
-TEST(QS8_IGEMM_MINMAX_RNDNU_3X4__SCALAR, k_eq_1_subtile) {
- for (uint32_t m = 1; m <= 3; m++) {
- for (uint32_t n = 1; n <= 4; n++) {
- GemmMicrokernelTester()
- .mr(3)
- .nr(4)
- .kr(1)
- .sr(1)
- .m(m)
- .n(n)
- .k(1)
- .iterations(1)
- .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_3x4__scalar, xnn_init_qs8_conv_minmax_rndnu_scalar_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
- }
- }
-}
-
-TEST(QS8_IGEMM_MINMAX_RNDNU_3X4__SCALAR, k_eq_1_subtile_m) {
- for (uint32_t m = 1; m <= 3; m++) {
- GemmMicrokernelTester()
- .mr(3)
- .nr(4)
- .kr(1)
- .sr(1)
- .m(m)
- .n(4)
- .k(1)
- .iterations(1)
- .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_3x4__scalar, xnn_init_qs8_conv_minmax_rndnu_scalar_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
- }
-}
-
-TEST(QS8_IGEMM_MINMAX_RNDNU_3X4__SCALAR, k_eq_1_subtile_n) {
- for (uint32_t n = 1; n <= 4; n++) {
- GemmMicrokernelTester()
- .mr(3)
- .nr(4)
- .kr(1)
- .sr(1)
- .m(3)
- .n(n)
- .k(1)
- .iterations(1)
- .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_3x4__scalar, xnn_init_qs8_conv_minmax_rndnu_scalar_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
- }
-}
-
-TEST(QS8_IGEMM_MINMAX_RNDNU_3X4__SCALAR, k_gt_1) {
- for (size_t k = 2; k < 10; k++) {
- GemmMicrokernelTester()
- .mr(3)
- .nr(4)
- .kr(1)
- .sr(1)
- .m(3)
- .n(4)
- .k(k)
- .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_3x4__scalar, xnn_init_qs8_conv_minmax_rndnu_scalar_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
- }
-}
-
-TEST(QS8_IGEMM_MINMAX_RNDNU_3X4__SCALAR, k_gt_1_subtile) {
- for (size_t k = 2; k < 10; k++) {
- for (uint32_t m = 1; m <= 3; m++) {
- for (uint32_t n = 1; n <= 4; n++) {
- GemmMicrokernelTester()
- .mr(3)
- .nr(4)
- .kr(1)
- .sr(1)
- .m(m)
- .n(n)
- .k(k)
- .iterations(1)
- .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_3x4__scalar, xnn_init_qs8_conv_minmax_rndnu_scalar_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
- }
- }
- }
-}
-
-TEST(QS8_IGEMM_MINMAX_RNDNU_3X4__SCALAR, n_gt_4) {
- for (uint32_t n = 5; n < 8; n++) {
- for (size_t k = 1; k <= 5; k += 2) {
- GemmMicrokernelTester()
- .mr(3)
- .nr(4)
- .kr(1)
- .sr(1)
- .m(3)
- .n(4)
- .k(k)
- .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_3x4__scalar, xnn_init_qs8_conv_minmax_rndnu_scalar_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
- }
- }
-}
-
-TEST(QS8_IGEMM_MINMAX_RNDNU_3X4__SCALAR, n_gt_4_strided_cn) {
- for (uint32_t n = 5; n < 8; n++) {
- for (size_t k = 1; k <= 5; k += 2) {
- GemmMicrokernelTester()
- .mr(3)
- .nr(4)
- .kr(1)
- .sr(1)
- .m(3)
- .n(4)
- .k(k)
- .cn_stride(7)
- .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_3x4__scalar, xnn_init_qs8_conv_minmax_rndnu_scalar_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
- }
- }
-}
-
-TEST(QS8_IGEMM_MINMAX_RNDNU_3X4__SCALAR, n_gt_4_subtile) {
- for (uint32_t n = 5; n < 8; n++) {
- for (size_t k = 1; k <= 5; k += 2) {
- for (uint32_t m = 1; m <= 3; m++) {
- GemmMicrokernelTester()
- .mr(3)
- .nr(4)
- .kr(1)
- .sr(1)
- .m(m)
- .n(n)
- .k(k)
- .iterations(1)
- .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_3x4__scalar, xnn_init_qs8_conv_minmax_rndnu_scalar_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
- }
- }
- }
-}
-
-TEST(QS8_IGEMM_MINMAX_RNDNU_3X4__SCALAR, n_div_4) {
- for (uint32_t n = 8; n <= 12; n += 4) {
- for (size_t k = 1; k <= 5; k += 2) {
- GemmMicrokernelTester()
- .mr(3)
- .nr(4)
- .kr(1)
- .sr(1)
- .m(3)
- .n(4)
- .k(k)
- .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_3x4__scalar, xnn_init_qs8_conv_minmax_rndnu_scalar_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
- }
- }
-}
-
-TEST(QS8_IGEMM_MINMAX_RNDNU_3X4__SCALAR, n_div_4_strided_cn) {
- for (uint32_t n = 8; n <= 12; n += 4) {
- for (size_t k = 1; k <= 5; k += 2) {
- GemmMicrokernelTester()
- .mr(3)
- .nr(4)
- .kr(1)
- .sr(1)
- .m(3)
- .n(n)
- .k(k)
- .cn_stride(7)
- .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_3x4__scalar, xnn_init_qs8_conv_minmax_rndnu_scalar_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
- }
- }
-}
-
-TEST(QS8_IGEMM_MINMAX_RNDNU_3X4__SCALAR, n_div_4_subtile) {
- for (uint32_t n = 8; n <= 12; n += 4) {
- for (size_t k = 1; k <= 5; k += 2) {
- for (uint32_t m = 1; m <= 3; m++) {
- GemmMicrokernelTester()
- .mr(3)
- .nr(4)
- .kr(1)
- .sr(1)
- .m(m)
- .n(n)
- .k(k)
- .iterations(1)
- .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_3x4__scalar, xnn_init_qs8_conv_minmax_rndnu_scalar_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
- }
- }
- }
-}
-
-TEST(QS8_IGEMM_MINMAX_RNDNU_3X4__SCALAR, small_kernel) {
- for (size_t k = 1; k <= 5; k += 2) {
- GemmMicrokernelTester()
- .mr(3)
- .nr(4)
- .kr(1)
- .sr(1)
- .m(3)
- .n(4)
- .k(k)
- .ks(3)
- .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_3x4__scalar, xnn_init_qs8_conv_minmax_rndnu_scalar_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
- }
-}
-
-TEST(QS8_IGEMM_MINMAX_RNDNU_3X4__SCALAR, small_kernel_subtile) {
- for (size_t k = 1; k <= 5; k += 2) {
- for (uint32_t m = 1; m <= 3; m++) {
- for (uint32_t n = 1; n <= 4; n++) {
- GemmMicrokernelTester()
- .mr(3)
- .nr(4)
- .kr(1)
- .sr(1)
- .m(m)
- .n(n)
- .k(k)
- .ks(3)
- .iterations(1)
- .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_3x4__scalar, xnn_init_qs8_conv_minmax_rndnu_scalar_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
- }
- }
- }
-}
-
-TEST(QS8_IGEMM_MINMAX_RNDNU_3X4__SCALAR, n_gt_4_small_kernel) {
- for (uint32_t n = 5; n < 8; n++) {
- for (size_t k = 1; k <= 5; k += 2) {
- GemmMicrokernelTester()
- .mr(3)
- .nr(4)
- .kr(1)
- .sr(1)
- .m(3)
- .n(4)
- .k(k)
- .ks(3)
- .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_3x4__scalar, xnn_init_qs8_conv_minmax_rndnu_scalar_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
- }
- }
-}
-
-TEST(QS8_IGEMM_MINMAX_RNDNU_3X4__SCALAR, n_div_4_small_kernel) {
- for (uint32_t n = 8; n <= 12; n += 4) {
- for (size_t k = 1; k <= 5; k += 2) {
- GemmMicrokernelTester()
- .mr(3)
- .nr(4)
- .kr(1)
- .sr(1)
- .m(3)
- .n(4)
- .k(k)
- .ks(3)
- .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_3x4__scalar, xnn_init_qs8_conv_minmax_rndnu_scalar_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
- }
- }
-}
-
-TEST(QS8_IGEMM_MINMAX_RNDNU_3X4__SCALAR, strided_cm_subtile) {
- for (size_t k = 1; k <= 5; k += 2) {
- for (uint32_t m = 1; m <= 3; m++) {
- for (uint32_t n = 1; n <= 4; n++) {
- GemmMicrokernelTester()
- .mr(3)
- .nr(4)
- .kr(1)
- .sr(1)
- .m(m)
- .n(n)
- .k(k)
- .cm_stride(7)
- .iterations(1)
- .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_3x4__scalar, xnn_init_qs8_conv_minmax_rndnu_scalar_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
- }
- }
- }
-}
-
-TEST(QS8_IGEMM_MINMAX_RNDNU_3X4__SCALAR, a_offset) {
- for (size_t k = 1; k <= 5; k += 2) {
- GemmMicrokernelTester()
- .mr(3)
- .nr(4)
- .kr(1)
- .sr(1)
- .m(3)
- .n(4)
- .k(k)
- .ks(3)
- .a_offset(17)
- .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_3x4__scalar, xnn_init_qs8_conv_minmax_rndnu_scalar_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
- }
-}
-
-TEST(QS8_IGEMM_MINMAX_RNDNU_3X4__SCALAR, zero) {
- for (uint32_t mz = 0; mz < 3; mz++) {
- for (size_t k = 1; k <= 5; k += 2) {
- GemmMicrokernelTester()
- .mr(3)
- .nr(4)
- .kr(1)
- .sr(1)
- .m(3)
- .n(4)
- .k(k)
- .ks(3)
- .a_offset(17)
- .zero_index(mz)
- .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_3x4__scalar, xnn_init_qs8_conv_minmax_rndnu_scalar_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
- }
- }
-}
-
-TEST(QS8_IGEMM_MINMAX_RNDNU_3X4__SCALAR, qmin) {
- GemmMicrokernelTester()
- .mr(3)
- .nr(4)
- .kr(1)
- .sr(1)
- .m(3)
- .n(4)
- .k(1)
- .qmin(128)
- .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_3x4__scalar, xnn_init_qs8_conv_minmax_rndnu_scalar_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
-}
-
-TEST(QS8_IGEMM_MINMAX_RNDNU_3X4__SCALAR, qmax) {
- GemmMicrokernelTester()
- .mr(3)
- .nr(4)
- .kr(1)
- .sr(1)
- .m(3)
- .n(4)
- .k(1)
- .qmax(128)
- .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_3x4__scalar, xnn_init_qs8_conv_minmax_rndnu_scalar_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
-}
-
-TEST(QS8_IGEMM_MINMAX_RNDNU_3X4__SCALAR, strided_cm) {
- GemmMicrokernelTester()
- .mr(3)
- .nr(4)
- .kr(1)
- .sr(1)
- .m(3)
- .n(4)
- .k(1)
- .cm_stride(7)
- .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_3x4__scalar, xnn_init_qs8_conv_minmax_rndnu_scalar_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
-}
-
-
-TEST(QS8_IGEMM_MINMAX_RNDNU_4X4__SCALAR, k_eq_1) {
- GemmMicrokernelTester()
- .mr(4)
- .nr(4)
- .kr(1)
- .sr(1)
- .m(4)
- .n(4)
- .k(1)
- .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_4x4__scalar, xnn_init_qs8_conv_minmax_rndnu_scalar_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
-}
-
-TEST(QS8_IGEMM_MINMAX_RNDNU_4X4__SCALAR, strided_cn) {
- GemmMicrokernelTester()
- .mr(4)
- .nr(4)
- .kr(1)
- .sr(1)
- .m(4)
- .n(4)
- .k(1)
- .cn_stride(7)
- .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_4x4__scalar, xnn_init_qs8_conv_minmax_rndnu_scalar_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
-}
-
-TEST(QS8_IGEMM_MINMAX_RNDNU_4X4__SCALAR, k_eq_1_subtile) {
- for (uint32_t m = 1; m <= 4; m++) {
- for (uint32_t n = 1; n <= 4; n++) {
- GemmMicrokernelTester()
- .mr(4)
- .nr(4)
- .kr(1)
- .sr(1)
- .m(m)
- .n(n)
- .k(1)
- .iterations(1)
- .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_4x4__scalar, xnn_init_qs8_conv_minmax_rndnu_scalar_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
- }
- }
-}
-
-TEST(QS8_IGEMM_MINMAX_RNDNU_4X4__SCALAR, k_eq_1_subtile_m) {
- for (uint32_t m = 1; m <= 4; m++) {
- GemmMicrokernelTester()
- .mr(4)
- .nr(4)
- .kr(1)
- .sr(1)
- .m(m)
- .n(4)
- .k(1)
- .iterations(1)
- .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_4x4__scalar, xnn_init_qs8_conv_minmax_rndnu_scalar_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
- }
-}
-
-TEST(QS8_IGEMM_MINMAX_RNDNU_4X4__SCALAR, k_eq_1_subtile_n) {
- for (uint32_t n = 1; n <= 4; n++) {
- GemmMicrokernelTester()
- .mr(4)
- .nr(4)
- .kr(1)
- .sr(1)
- .m(4)
- .n(n)
- .k(1)
- .iterations(1)
- .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_4x4__scalar, xnn_init_qs8_conv_minmax_rndnu_scalar_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
- }
-}
-
-TEST(QS8_IGEMM_MINMAX_RNDNU_4X4__SCALAR, k_gt_1) {
- for (size_t k = 2; k < 10; k++) {
- GemmMicrokernelTester()
- .mr(4)
- .nr(4)
- .kr(1)
- .sr(1)
- .m(4)
- .n(4)
- .k(k)
- .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_4x4__scalar, xnn_init_qs8_conv_minmax_rndnu_scalar_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
- }
-}
-
-TEST(QS8_IGEMM_MINMAX_RNDNU_4X4__SCALAR, k_gt_1_subtile) {
- for (size_t k = 2; k < 10; k++) {
- for (uint32_t m = 1; m <= 4; m++) {
- for (uint32_t n = 1; n <= 4; n++) {
- GemmMicrokernelTester()
- .mr(4)
- .nr(4)
- .kr(1)
- .sr(1)
- .m(m)
- .n(n)
- .k(k)
- .iterations(1)
- .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_4x4__scalar, xnn_init_qs8_conv_minmax_rndnu_scalar_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
- }
- }
- }
-}
-
-TEST(QS8_IGEMM_MINMAX_RNDNU_4X4__SCALAR, n_gt_4) {
- for (uint32_t n = 5; n < 8; n++) {
- for (size_t k = 1; k <= 5; k += 2) {
- GemmMicrokernelTester()
- .mr(4)
- .nr(4)
- .kr(1)
- .sr(1)
- .m(4)
- .n(4)
- .k(k)
- .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_4x4__scalar, xnn_init_qs8_conv_minmax_rndnu_scalar_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
- }
- }
-}
-
-TEST(QS8_IGEMM_MINMAX_RNDNU_4X4__SCALAR, n_gt_4_strided_cn) {
- for (uint32_t n = 5; n < 8; n++) {
- for (size_t k = 1; k <= 5; k += 2) {
- GemmMicrokernelTester()
- .mr(4)
- .nr(4)
- .kr(1)
- .sr(1)
- .m(4)
- .n(4)
- .k(k)
- .cn_stride(7)
- .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_4x4__scalar, xnn_init_qs8_conv_minmax_rndnu_scalar_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
- }
- }
-}
-
-TEST(QS8_IGEMM_MINMAX_RNDNU_4X4__SCALAR, n_gt_4_subtile) {
- for (uint32_t n = 5; n < 8; n++) {
- for (size_t k = 1; k <= 5; k += 2) {
- for (uint32_t m = 1; m <= 4; m++) {
- GemmMicrokernelTester()
- .mr(4)
- .nr(4)
- .kr(1)
- .sr(1)
- .m(m)
- .n(n)
- .k(k)
- .iterations(1)
- .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_4x4__scalar, xnn_init_qs8_conv_minmax_rndnu_scalar_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
- }
- }
- }
-}
-
-TEST(QS8_IGEMM_MINMAX_RNDNU_4X4__SCALAR, n_div_4) {
- for (uint32_t n = 8; n <= 12; n += 4) {
- for (size_t k = 1; k <= 5; k += 2) {
- GemmMicrokernelTester()
- .mr(4)
- .nr(4)
- .kr(1)
- .sr(1)
- .m(4)
- .n(4)
- .k(k)
- .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_4x4__scalar, xnn_init_qs8_conv_minmax_rndnu_scalar_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
- }
- }
-}
-
-TEST(QS8_IGEMM_MINMAX_RNDNU_4X4__SCALAR, n_div_4_strided_cn) {
- for (uint32_t n = 8; n <= 12; n += 4) {
- for (size_t k = 1; k <= 5; k += 2) {
- GemmMicrokernelTester()
- .mr(4)
- .nr(4)
- .kr(1)
- .sr(1)
- .m(4)
- .n(n)
- .k(k)
- .cn_stride(7)
- .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_4x4__scalar, xnn_init_qs8_conv_minmax_rndnu_scalar_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
- }
- }
-}
-
-TEST(QS8_IGEMM_MINMAX_RNDNU_4X4__SCALAR, n_div_4_subtile) {
- for (uint32_t n = 8; n <= 12; n += 4) {
- for (size_t k = 1; k <= 5; k += 2) {
- for (uint32_t m = 1; m <= 4; m++) {
- GemmMicrokernelTester()
- .mr(4)
- .nr(4)
- .kr(1)
- .sr(1)
- .m(m)
- .n(n)
- .k(k)
- .iterations(1)
- .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_4x4__scalar, xnn_init_qs8_conv_minmax_rndnu_scalar_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
- }
- }
- }
-}
-
-TEST(QS8_IGEMM_MINMAX_RNDNU_4X4__SCALAR, small_kernel) {
- for (size_t k = 1; k <= 5; k += 2) {
- GemmMicrokernelTester()
- .mr(4)
- .nr(4)
- .kr(1)
- .sr(1)
- .m(4)
- .n(4)
- .k(k)
- .ks(3)
- .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_4x4__scalar, xnn_init_qs8_conv_minmax_rndnu_scalar_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
- }
-}
-
-TEST(QS8_IGEMM_MINMAX_RNDNU_4X4__SCALAR, small_kernel_subtile) {
- for (size_t k = 1; k <= 5; k += 2) {
- for (uint32_t m = 1; m <= 4; m++) {
- for (uint32_t n = 1; n <= 4; n++) {
- GemmMicrokernelTester()
- .mr(4)
- .nr(4)
- .kr(1)
- .sr(1)
- .m(m)
- .n(n)
- .k(k)
- .ks(3)
- .iterations(1)
- .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_4x4__scalar, xnn_init_qs8_conv_minmax_rndnu_scalar_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
- }
- }
- }
-}
-
-TEST(QS8_IGEMM_MINMAX_RNDNU_4X4__SCALAR, n_gt_4_small_kernel) {
- for (uint32_t n = 5; n < 8; n++) {
- for (size_t k = 1; k <= 5; k += 2) {
- GemmMicrokernelTester()
- .mr(4)
- .nr(4)
- .kr(1)
- .sr(1)
- .m(4)
- .n(4)
- .k(k)
- .ks(3)
- .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_4x4__scalar, xnn_init_qs8_conv_minmax_rndnu_scalar_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
- }
- }
-}
-
-TEST(QS8_IGEMM_MINMAX_RNDNU_4X4__SCALAR, n_div_4_small_kernel) {
- for (uint32_t n = 8; n <= 12; n += 4) {
- for (size_t k = 1; k <= 5; k += 2) {
- GemmMicrokernelTester()
- .mr(4)
- .nr(4)
- .kr(1)
- .sr(1)
- .m(4)
- .n(4)
- .k(k)
- .ks(3)
- .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_4x4__scalar, xnn_init_qs8_conv_minmax_rndnu_scalar_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
- }
- }
-}
-
-TEST(QS8_IGEMM_MINMAX_RNDNU_4X4__SCALAR, strided_cm_subtile) {
- for (size_t k = 1; k <= 5; k += 2) {
- for (uint32_t m = 1; m <= 4; m++) {
- for (uint32_t n = 1; n <= 4; n++) {
- GemmMicrokernelTester()
- .mr(4)
- .nr(4)
- .kr(1)
- .sr(1)
- .m(m)
- .n(n)
- .k(k)
- .cm_stride(7)
- .iterations(1)
- .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_4x4__scalar, xnn_init_qs8_conv_minmax_rndnu_scalar_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
- }
- }
- }
-}
-
-TEST(QS8_IGEMM_MINMAX_RNDNU_4X4__SCALAR, a_offset) {
- for (size_t k = 1; k <= 5; k += 2) {
- GemmMicrokernelTester()
- .mr(4)
- .nr(4)
- .kr(1)
- .sr(1)
- .m(4)
- .n(4)
- .k(k)
- .ks(3)
- .a_offset(23)
- .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_4x4__scalar, xnn_init_qs8_conv_minmax_rndnu_scalar_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
- }
-}
-
-TEST(QS8_IGEMM_MINMAX_RNDNU_4X4__SCALAR, zero) {
- for (uint32_t mz = 0; mz < 4; mz++) {
- for (size_t k = 1; k <= 5; k += 2) {
- GemmMicrokernelTester()
- .mr(4)
- .nr(4)
- .kr(1)
- .sr(1)
- .m(4)
- .n(4)
- .k(k)
- .ks(3)
- .a_offset(23)
- .zero_index(mz)
- .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_4x4__scalar, xnn_init_qs8_conv_minmax_rndnu_scalar_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
- }
- }
-}
-
-TEST(QS8_IGEMM_MINMAX_RNDNU_4X4__SCALAR, qmin) {
- GemmMicrokernelTester()
- .mr(4)
- .nr(4)
- .kr(1)
- .sr(1)
- .m(4)
- .n(4)
- .k(1)
- .qmin(128)
- .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_4x4__scalar, xnn_init_qs8_conv_minmax_rndnu_scalar_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
-}
-
-TEST(QS8_IGEMM_MINMAX_RNDNU_4X4__SCALAR, qmax) {
- GemmMicrokernelTester()
- .mr(4)
- .nr(4)
- .kr(1)
- .sr(1)
- .m(4)
- .n(4)
- .k(1)
- .qmax(128)
- .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_4x4__scalar, xnn_init_qs8_conv_minmax_rndnu_scalar_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
-}
-
-TEST(QS8_IGEMM_MINMAX_RNDNU_4X4__SCALAR, strided_cm) {
- GemmMicrokernelTester()
- .mr(4)
- .nr(4)
- .kr(1)
- .sr(1)
- .m(4)
- .n(4)
- .k(1)
- .cm_stride(7)
- .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_4x4__scalar, xnn_init_qs8_conv_minmax_rndnu_scalar_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
-}
diff --git a/test/qs8-igemm-minmax-rndnu.yaml b/test/qs8-igemm-minmax-rndnu.yaml
index 0643ec7..7c42cd3 100644
--- a/test/qs8-igemm-minmax-rndnu.yaml
+++ b/test/qs8-igemm-minmax-rndnu.yaml
@@ -699,27 +699,3 @@
- name: xnn_qs8_igemm_minmax_rndnu_ukernel_4x16__neon_mull_addw_dup
init: xnn_init_qs8_conv_minmax_rndnu_neon_params
k-block: 8
-- name: xnn_qs8_igemm_minmax_rndnu_ukernel_1x2__scalar
- init: xnn_init_qs8_conv_minmax_rndnu_scalar_params
- k-block: 1
-- name: xnn_qs8_igemm_minmax_rndnu_ukernel_2x2__scalar
- init: xnn_init_qs8_conv_minmax_rndnu_scalar_params
- k-block: 1
-- name: xnn_qs8_igemm_minmax_rndnu_ukernel_3x2__scalar
- init: xnn_init_qs8_conv_minmax_rndnu_scalar_params
- k-block: 1
-- name: xnn_qs8_igemm_minmax_rndnu_ukernel_4x2__scalar
- init: xnn_init_qs8_conv_minmax_rndnu_scalar_params
- k-block: 1
-- name: xnn_qs8_igemm_minmax_rndnu_ukernel_1x4__scalar
- init: xnn_init_qs8_conv_minmax_rndnu_scalar_params
- k-block: 1
-- name: xnn_qs8_igemm_minmax_rndnu_ukernel_2x4__scalar
- init: xnn_init_qs8_conv_minmax_rndnu_scalar_params
- k-block: 1
-- name: xnn_qs8_igemm_minmax_rndnu_ukernel_3x4__scalar
- init: xnn_init_qs8_conv_minmax_rndnu_scalar_params
- k-block: 1
-- name: xnn_qs8_igemm_minmax_rndnu_ukernel_4x4__scalar
- init: xnn_init_qs8_conv_minmax_rndnu_scalar_params
- k-block: 1
diff --git a/test/qs8-requantization.cc b/test/qs8-requantization.cc
index 98906cd..97dd14e 100644
--- a/test/qs8-requantization.cc
+++ b/test/qs8-requantization.cc
@@ -404,12 +404,12 @@
* FP32-based scalar implementation using magic trick for FP32->INT32 conversion.
*/
-TEST(QS8_FP32__SCALAR_MAGIC, random_cases) {
+TEST(QS8_FP32__SCALAR_FMAGIC, random_cases) {
RequantizationTester()
.qmin(std::numeric_limits<int8_t>::min())
.qmax(std::numeric_limits<int8_t>::max())
.iterations(1000)
- .TestRandomCasesApproximate(xnn_qs8_requantize_fp32__scalar_magic);
+ .TestRandomCasesApproximate(xnn_qs8_requantize_fp32__scalar_fmagic);
}
diff --git a/test/qu8-dwconv-minmax-fp32.cc b/test/qu8-dwconv-minmax-fp32.cc
index 80a30da..71b8ab4 100644
--- a/test/qu8-dwconv-minmax-fp32.cc
+++ b/test/qu8-dwconv-minmax-fp32.cc
@@ -5961,58 +5961,58 @@
#endif // XNN_ARCH_WASMSIMD || XNN_ARCH_WASMRELAXEDSIMD
-TEST(QU8_DWCONV_MINMAX_FP32_UP1X9__SCALAR_LRINT, c_eq_1) {
+TEST(QU8_DWCONV_MINMAX_FP32_UP1X9__SCALAR_LRINTF, c_eq_1) {
DWConvMicrokernelTester()
.cr(1)
.kr(9)
.channels(1)
- .Test(xnn_qu8_dwconv_minmax_fp32_ukernel_up1x9__scalar_lrint, xnn_init_qu8_conv_minmax_fp32_scalar_lrint_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
+ .Test(xnn_qu8_dwconv_minmax_fp32_ukernel_up1x9__scalar_lrintf, xnn_init_qu8_conv_minmax_fp32_scalar_lrintf_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
}
-TEST(QU8_DWCONV_MINMAX_FP32_UP1X9__SCALAR_LRINT, c_gt_1) {
+TEST(QU8_DWCONV_MINMAX_FP32_UP1X9__SCALAR_LRINTF, c_gt_1) {
for (uint32_t channels = 2; channels < 10; channels++) {
DWConvMicrokernelTester()
.cr(1)
.kr(9)
.channels(channels)
- .Test(xnn_qu8_dwconv_minmax_fp32_ukernel_up1x9__scalar_lrint, xnn_init_qu8_conv_minmax_fp32_scalar_lrint_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
+ .Test(xnn_qu8_dwconv_minmax_fp32_ukernel_up1x9__scalar_lrintf, xnn_init_qu8_conv_minmax_fp32_scalar_lrintf_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
}
}
-TEST(QU8_DWCONV_MINMAX_FP32_UP1X9__SCALAR_LRINT, c_gt_1_with_qmin) {
+TEST(QU8_DWCONV_MINMAX_FP32_UP1X9__SCALAR_LRINTF, c_gt_1_with_qmin) {
for (uint32_t channels = 2; channels < 10; channels++) {
DWConvMicrokernelTester()
.cr(1)
.kr(9)
.channels(channels)
.qmin(128)
- .Test(xnn_qu8_dwconv_minmax_fp32_ukernel_up1x9__scalar_lrint, xnn_init_qu8_conv_minmax_fp32_scalar_lrint_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
+ .Test(xnn_qu8_dwconv_minmax_fp32_ukernel_up1x9__scalar_lrintf, xnn_init_qu8_conv_minmax_fp32_scalar_lrintf_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
}
}
-TEST(QU8_DWCONV_MINMAX_FP32_UP1X9__SCALAR_LRINT, c_gt_1_with_qmax) {
+TEST(QU8_DWCONV_MINMAX_FP32_UP1X9__SCALAR_LRINTF, c_gt_1_with_qmax) {
for (uint32_t channels = 2; channels < 10; channels++) {
DWConvMicrokernelTester()
.cr(1)
.kr(9)
.channels(channels)
.qmax(128)
- .Test(xnn_qu8_dwconv_minmax_fp32_ukernel_up1x9__scalar_lrint, xnn_init_qu8_conv_minmax_fp32_scalar_lrint_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
+ .Test(xnn_qu8_dwconv_minmax_fp32_ukernel_up1x9__scalar_lrintf, xnn_init_qu8_conv_minmax_fp32_scalar_lrintf_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
}
}
-TEST(QU8_DWCONV_MINMAX_FP32_UP1X9__SCALAR_LRINT, multipixel) {
+TEST(QU8_DWCONV_MINMAX_FP32_UP1X9__SCALAR_LRINTF, multipixel) {
for (size_t channels = 1; channels <= 5; channels += 1) {
DWConvMicrokernelTester()
.cr(1)
.kr(9)
.channels(channels)
.width(3)
- .Test(xnn_qu8_dwconv_minmax_fp32_ukernel_up1x9__scalar_lrint, xnn_init_qu8_conv_minmax_fp32_scalar_lrint_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
+ .Test(xnn_qu8_dwconv_minmax_fp32_ukernel_up1x9__scalar_lrintf, xnn_init_qu8_conv_minmax_fp32_scalar_lrintf_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
}
}
-TEST(QU8_DWCONV_MINMAX_FP32_UP1X9__SCALAR_LRINT, multipixel_with_step) {
+TEST(QU8_DWCONV_MINMAX_FP32_UP1X9__SCALAR_LRINTF, multipixel_with_step) {
for (size_t channels = 1; channels <= 5; channels += 1) {
for (size_t step = 2; step <= 9; step++) {
DWConvMicrokernelTester()
@@ -6021,12 +6021,12 @@
.channels(channels)
.width(3)
.step(step)
- .Test(xnn_qu8_dwconv_minmax_fp32_ukernel_up1x9__scalar_lrint, xnn_init_qu8_conv_minmax_fp32_scalar_lrint_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
+ .Test(xnn_qu8_dwconv_minmax_fp32_ukernel_up1x9__scalar_lrintf, xnn_init_qu8_conv_minmax_fp32_scalar_lrintf_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
}
}
}
-TEST(QU8_DWCONV_MINMAX_FP32_UP1X9__SCALAR_LRINT, multipixel_with_output_stride) {
+TEST(QU8_DWCONV_MINMAX_FP32_UP1X9__SCALAR_LRINTF, multipixel_with_output_stride) {
for (size_t channels = 1; channels <= 5; channels += 1) {
DWConvMicrokernelTester()
.cr(1)
@@ -6034,11 +6034,11 @@
.channels(1)
.width(5)
.output_stride(7)
- .Test(xnn_qu8_dwconv_minmax_fp32_ukernel_up1x9__scalar_lrint, xnn_init_qu8_conv_minmax_fp32_scalar_lrint_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
+ .Test(xnn_qu8_dwconv_minmax_fp32_ukernel_up1x9__scalar_lrintf, xnn_init_qu8_conv_minmax_fp32_scalar_lrintf_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
}
}
-TEST(QU8_DWCONV_MINMAX_FP32_UP1X9__SCALAR_LRINT, multipixel_with_qmin) {
+TEST(QU8_DWCONV_MINMAX_FP32_UP1X9__SCALAR_LRINTF, multipixel_with_qmin) {
for (size_t channels = 1; channels <= 5; channels += 1) {
DWConvMicrokernelTester()
.cr(1)
@@ -6046,11 +6046,11 @@
.channels(channels)
.width(3)
.qmin(128)
- .Test(xnn_qu8_dwconv_minmax_fp32_ukernel_up1x9__scalar_lrint, xnn_init_qu8_conv_minmax_fp32_scalar_lrint_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
+ .Test(xnn_qu8_dwconv_minmax_fp32_ukernel_up1x9__scalar_lrintf, xnn_init_qu8_conv_minmax_fp32_scalar_lrintf_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
}
}
-TEST(QU8_DWCONV_MINMAX_FP32_UP1X9__SCALAR_LRINT, multipixel_with_qmax) {
+TEST(QU8_DWCONV_MINMAX_FP32_UP1X9__SCALAR_LRINTF, multipixel_with_qmax) {
for (size_t channels = 1; channels <= 5; channels += 1) {
DWConvMicrokernelTester()
.cr(1)
@@ -6058,11 +6058,11 @@
.channels(channels)
.width(3)
.qmax(128)
- .Test(xnn_qu8_dwconv_minmax_fp32_ukernel_up1x9__scalar_lrint, xnn_init_qu8_conv_minmax_fp32_scalar_lrint_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
+ .Test(xnn_qu8_dwconv_minmax_fp32_ukernel_up1x9__scalar_lrintf, xnn_init_qu8_conv_minmax_fp32_scalar_lrintf_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
}
}
-TEST(QU8_DWCONV_MINMAX_FP32_UP1X9__SCALAR_LRINT, input_zero_point_only) {
+TEST(QU8_DWCONV_MINMAX_FP32_UP1X9__SCALAR_LRINTF, input_zero_point_only) {
for (size_t channels = 1; channels <= 5; channels += 1) {
DWConvMicrokernelTester()
.cr(1)
@@ -6071,11 +6071,11 @@
.width(3)
.input_zero_point(255)
.kernel_zero_point(0)
- .Test(xnn_qu8_dwconv_minmax_fp32_ukernel_up1x9__scalar_lrint, xnn_init_qu8_conv_minmax_fp32_scalar_lrint_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
+ .Test(xnn_qu8_dwconv_minmax_fp32_ukernel_up1x9__scalar_lrintf, xnn_init_qu8_conv_minmax_fp32_scalar_lrintf_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
}
}
-TEST(QU8_DWCONV_MINMAX_FP32_UP1X9__SCALAR_LRINT, kernel_zero_point_only) {
+TEST(QU8_DWCONV_MINMAX_FP32_UP1X9__SCALAR_LRINTF, kernel_zero_point_only) {
for (size_t channels = 1; channels <= 5; channels += 1) {
DWConvMicrokernelTester()
.cr(1)
@@ -6084,22 +6084,22 @@
.width(3)
.input_zero_point(0)
.kernel_zero_point(255)
- .Test(xnn_qu8_dwconv_minmax_fp32_ukernel_up1x9__scalar_lrint, xnn_init_qu8_conv_minmax_fp32_scalar_lrint_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
+ .Test(xnn_qu8_dwconv_minmax_fp32_ukernel_up1x9__scalar_lrintf, xnn_init_qu8_conv_minmax_fp32_scalar_lrintf_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
}
}
-TEST(QU8_DWCONV_MINMAX_FP32_UP1X9__SCALAR_LRINT, input_offset) {
+TEST(QU8_DWCONV_MINMAX_FP32_UP1X9__SCALAR_LRINTF, input_offset) {
for (uint32_t channels = 2; channels < 16; channels += 3) {
DWConvMicrokernelTester()
.cr(1)
.kr(9)
.channels(channels)
.input_offset(48)
- .Test(xnn_qu8_dwconv_minmax_fp32_ukernel_up1x9__scalar_lrint, xnn_init_qu8_conv_minmax_fp32_scalar_lrint_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
+ .Test(xnn_qu8_dwconv_minmax_fp32_ukernel_up1x9__scalar_lrintf, xnn_init_qu8_conv_minmax_fp32_scalar_lrintf_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
}
}
-TEST(QU8_DWCONV_MINMAX_FP32_UP1X9__SCALAR_LRINT, zero) {
+TEST(QU8_DWCONV_MINMAX_FP32_UP1X9__SCALAR_LRINTF, zero) {
for (uint32_t mz = 0; mz < 9; mz++) {
for (uint32_t channels = 2; channels < 16; channels += 3) {
DWConvMicrokernelTester()
@@ -6108,105 +6108,105 @@
.channels(channels)
.input_offset(48)
.zero_index(mz)
- .Test(xnn_qu8_dwconv_minmax_fp32_ukernel_up1x9__scalar_lrint, xnn_init_qu8_conv_minmax_fp32_scalar_lrint_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
+ .Test(xnn_qu8_dwconv_minmax_fp32_ukernel_up1x9__scalar_lrintf, xnn_init_qu8_conv_minmax_fp32_scalar_lrintf_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
}
}
}
-TEST(QU8_DWCONV_MINMAX_FP32_UP2X9__SCALAR_LRINT, c_eq_2) {
+TEST(QU8_DWCONV_MINMAX_FP32_UP2X9__SCALAR_LRINTF, c_eq_2) {
DWConvMicrokernelTester()
.cr(2)
.kr(9)
.channels(2)
- .Test(xnn_qu8_dwconv_minmax_fp32_ukernel_up2x9__scalar_lrint, xnn_init_qu8_conv_minmax_fp32_scalar_lrint_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
+ .Test(xnn_qu8_dwconv_minmax_fp32_ukernel_up2x9__scalar_lrintf, xnn_init_qu8_conv_minmax_fp32_scalar_lrintf_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
}
-TEST(QU8_DWCONV_MINMAX_FP32_UP2X9__SCALAR_LRINT, c_div_2) {
+TEST(QU8_DWCONV_MINMAX_FP32_UP2X9__SCALAR_LRINTF, c_div_2) {
for (uint32_t channels = 4; channels < 32; channels += 6) {
DWConvMicrokernelTester()
.cr(2)
.kr(9)
.channels(channels)
- .Test(xnn_qu8_dwconv_minmax_fp32_ukernel_up2x9__scalar_lrint, xnn_init_qu8_conv_minmax_fp32_scalar_lrint_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
+ .Test(xnn_qu8_dwconv_minmax_fp32_ukernel_up2x9__scalar_lrintf, xnn_init_qu8_conv_minmax_fp32_scalar_lrintf_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
}
}
-TEST(QU8_DWCONV_MINMAX_FP32_UP2X9__SCALAR_LRINT, c_div_2_with_qmin) {
+TEST(QU8_DWCONV_MINMAX_FP32_UP2X9__SCALAR_LRINTF, c_div_2_with_qmin) {
for (uint32_t channels = 4; channels < 32; channels += 6) {
DWConvMicrokernelTester()
.cr(2)
.kr(9)
.channels(channels)
.qmin(128)
- .Test(xnn_qu8_dwconv_minmax_fp32_ukernel_up2x9__scalar_lrint, xnn_init_qu8_conv_minmax_fp32_scalar_lrint_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
+ .Test(xnn_qu8_dwconv_minmax_fp32_ukernel_up2x9__scalar_lrintf, xnn_init_qu8_conv_minmax_fp32_scalar_lrintf_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
}
}
-TEST(QU8_DWCONV_MINMAX_FP32_UP2X9__SCALAR_LRINT, c_div_2_with_qmax) {
+TEST(QU8_DWCONV_MINMAX_FP32_UP2X9__SCALAR_LRINTF, c_div_2_with_qmax) {
for (uint32_t channels = 4; channels < 32; channels += 6) {
DWConvMicrokernelTester()
.cr(2)
.kr(9)
.channels(channels)
.qmax(128)
- .Test(xnn_qu8_dwconv_minmax_fp32_ukernel_up2x9__scalar_lrint, xnn_init_qu8_conv_minmax_fp32_scalar_lrint_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
+ .Test(xnn_qu8_dwconv_minmax_fp32_ukernel_up2x9__scalar_lrintf, xnn_init_qu8_conv_minmax_fp32_scalar_lrintf_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
}
}
-TEST(QU8_DWCONV_MINMAX_FP32_UP2X9__SCALAR_LRINT, c_lt_2) {
+TEST(QU8_DWCONV_MINMAX_FP32_UP2X9__SCALAR_LRINTF, c_lt_2) {
for (uint32_t channels = 1; channels < 2; channels++) {
DWConvMicrokernelTester()
.cr(2)
.kr(9)
.channels(channels)
- .Test(xnn_qu8_dwconv_minmax_fp32_ukernel_up2x9__scalar_lrint, xnn_init_qu8_conv_minmax_fp32_scalar_lrint_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
+ .Test(xnn_qu8_dwconv_minmax_fp32_ukernel_up2x9__scalar_lrintf, xnn_init_qu8_conv_minmax_fp32_scalar_lrintf_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
}
}
-TEST(QU8_DWCONV_MINMAX_FP32_UP2X9__SCALAR_LRINT, c_gt_2) {
+TEST(QU8_DWCONV_MINMAX_FP32_UP2X9__SCALAR_LRINTF, c_gt_2) {
for (uint32_t channels = 3; channels < 4; channels++) {
DWConvMicrokernelTester()
.cr(2)
.kr(9)
.channels(channels)
- .Test(xnn_qu8_dwconv_minmax_fp32_ukernel_up2x9__scalar_lrint, xnn_init_qu8_conv_minmax_fp32_scalar_lrint_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
+ .Test(xnn_qu8_dwconv_minmax_fp32_ukernel_up2x9__scalar_lrintf, xnn_init_qu8_conv_minmax_fp32_scalar_lrintf_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
}
}
-TEST(QU8_DWCONV_MINMAX_FP32_UP2X9__SCALAR_LRINT, c_gt_2_with_qmin) {
+TEST(QU8_DWCONV_MINMAX_FP32_UP2X9__SCALAR_LRINTF, c_gt_2_with_qmin) {
for (uint32_t channels = 3; channels < 4; channels++) {
DWConvMicrokernelTester()
.cr(2)
.kr(9)
.channels(channels)
.qmin(128)
- .Test(xnn_qu8_dwconv_minmax_fp32_ukernel_up2x9__scalar_lrint, xnn_init_qu8_conv_minmax_fp32_scalar_lrint_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
+ .Test(xnn_qu8_dwconv_minmax_fp32_ukernel_up2x9__scalar_lrintf, xnn_init_qu8_conv_minmax_fp32_scalar_lrintf_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
}
}
-TEST(QU8_DWCONV_MINMAX_FP32_UP2X9__SCALAR_LRINT, c_gt_2_with_qmax) {
+TEST(QU8_DWCONV_MINMAX_FP32_UP2X9__SCALAR_LRINTF, c_gt_2_with_qmax) {
for (uint32_t channels = 3; channels < 4; channels++) {
DWConvMicrokernelTester()
.cr(2)
.kr(9)
.channels(channels)
.qmax(128)
- .Test(xnn_qu8_dwconv_minmax_fp32_ukernel_up2x9__scalar_lrint, xnn_init_qu8_conv_minmax_fp32_scalar_lrint_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
+ .Test(xnn_qu8_dwconv_minmax_fp32_ukernel_up2x9__scalar_lrintf, xnn_init_qu8_conv_minmax_fp32_scalar_lrintf_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
}
}
-TEST(QU8_DWCONV_MINMAX_FP32_UP2X9__SCALAR_LRINT, multipixel) {
+TEST(QU8_DWCONV_MINMAX_FP32_UP2X9__SCALAR_LRINTF, multipixel) {
for (size_t channels = 1; channels <= 10; channels += 1) {
DWConvMicrokernelTester()
.cr(2)
.kr(9)
.channels(channels)
.width(3)
- .Test(xnn_qu8_dwconv_minmax_fp32_ukernel_up2x9__scalar_lrint, xnn_init_qu8_conv_minmax_fp32_scalar_lrint_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
+ .Test(xnn_qu8_dwconv_minmax_fp32_ukernel_up2x9__scalar_lrintf, xnn_init_qu8_conv_minmax_fp32_scalar_lrintf_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
}
}
-TEST(QU8_DWCONV_MINMAX_FP32_UP2X9__SCALAR_LRINT, multipixel_with_step) {
+TEST(QU8_DWCONV_MINMAX_FP32_UP2X9__SCALAR_LRINTF, multipixel_with_step) {
for (size_t channels = 1; channels <= 10; channels += 1) {
for (size_t step = 2; step <= 9; step++) {
DWConvMicrokernelTester()
@@ -6215,12 +6215,12 @@
.channels(channels)
.width(3)
.step(step)
- .Test(xnn_qu8_dwconv_minmax_fp32_ukernel_up2x9__scalar_lrint, xnn_init_qu8_conv_minmax_fp32_scalar_lrint_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
+ .Test(xnn_qu8_dwconv_minmax_fp32_ukernel_up2x9__scalar_lrintf, xnn_init_qu8_conv_minmax_fp32_scalar_lrintf_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
}
}
}
-TEST(QU8_DWCONV_MINMAX_FP32_UP2X9__SCALAR_LRINT, multipixel_with_output_stride) {
+TEST(QU8_DWCONV_MINMAX_FP32_UP2X9__SCALAR_LRINTF, multipixel_with_output_stride) {
for (size_t channels = 1; channels <= 10; channels += 1) {
DWConvMicrokernelTester()
.cr(2)
@@ -6228,11 +6228,11 @@
.channels(2)
.width(5)
.output_stride(13)
- .Test(xnn_qu8_dwconv_minmax_fp32_ukernel_up2x9__scalar_lrint, xnn_init_qu8_conv_minmax_fp32_scalar_lrint_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
+ .Test(xnn_qu8_dwconv_minmax_fp32_ukernel_up2x9__scalar_lrintf, xnn_init_qu8_conv_minmax_fp32_scalar_lrintf_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
}
}
-TEST(QU8_DWCONV_MINMAX_FP32_UP2X9__SCALAR_LRINT, multipixel_with_qmin) {
+TEST(QU8_DWCONV_MINMAX_FP32_UP2X9__SCALAR_LRINTF, multipixel_with_qmin) {
for (size_t channels = 1; channels <= 10; channels += 1) {
DWConvMicrokernelTester()
.cr(2)
@@ -6240,11 +6240,11 @@
.channels(channels)
.width(3)
.qmin(128)
- .Test(xnn_qu8_dwconv_minmax_fp32_ukernel_up2x9__scalar_lrint, xnn_init_qu8_conv_minmax_fp32_scalar_lrint_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
+ .Test(xnn_qu8_dwconv_minmax_fp32_ukernel_up2x9__scalar_lrintf, xnn_init_qu8_conv_minmax_fp32_scalar_lrintf_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
}
}
-TEST(QU8_DWCONV_MINMAX_FP32_UP2X9__SCALAR_LRINT, multipixel_with_qmax) {
+TEST(QU8_DWCONV_MINMAX_FP32_UP2X9__SCALAR_LRINTF, multipixel_with_qmax) {
for (size_t channels = 1; channels <= 10; channels += 1) {
DWConvMicrokernelTester()
.cr(2)
@@ -6252,11 +6252,11 @@
.channels(channels)
.width(3)
.qmax(128)
- .Test(xnn_qu8_dwconv_minmax_fp32_ukernel_up2x9__scalar_lrint, xnn_init_qu8_conv_minmax_fp32_scalar_lrint_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
+ .Test(xnn_qu8_dwconv_minmax_fp32_ukernel_up2x9__scalar_lrintf, xnn_init_qu8_conv_minmax_fp32_scalar_lrintf_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
}
}
-TEST(QU8_DWCONV_MINMAX_FP32_UP2X9__SCALAR_LRINT, input_zero_point_only) {
+TEST(QU8_DWCONV_MINMAX_FP32_UP2X9__SCALAR_LRINTF, input_zero_point_only) {
for (size_t channels = 1; channels <= 10; channels += 1) {
DWConvMicrokernelTester()
.cr(2)
@@ -6265,11 +6265,11 @@
.width(3)
.input_zero_point(255)
.kernel_zero_point(0)
- .Test(xnn_qu8_dwconv_minmax_fp32_ukernel_up2x9__scalar_lrint, xnn_init_qu8_conv_minmax_fp32_scalar_lrint_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
+ .Test(xnn_qu8_dwconv_minmax_fp32_ukernel_up2x9__scalar_lrintf, xnn_init_qu8_conv_minmax_fp32_scalar_lrintf_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
}
}
-TEST(QU8_DWCONV_MINMAX_FP32_UP2X9__SCALAR_LRINT, kernel_zero_point_only) {
+TEST(QU8_DWCONV_MINMAX_FP32_UP2X9__SCALAR_LRINTF, kernel_zero_point_only) {
for (size_t channels = 1; channels <= 10; channels += 1) {
DWConvMicrokernelTester()
.cr(2)
@@ -6278,22 +6278,22 @@
.width(3)
.input_zero_point(0)
.kernel_zero_point(255)
- .Test(xnn_qu8_dwconv_minmax_fp32_ukernel_up2x9__scalar_lrint, xnn_init_qu8_conv_minmax_fp32_scalar_lrint_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
+ .Test(xnn_qu8_dwconv_minmax_fp32_ukernel_up2x9__scalar_lrintf, xnn_init_qu8_conv_minmax_fp32_scalar_lrintf_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
}
}
-TEST(QU8_DWCONV_MINMAX_FP32_UP2X9__SCALAR_LRINT, input_offset) {
+TEST(QU8_DWCONV_MINMAX_FP32_UP2X9__SCALAR_LRINTF, input_offset) {
for (uint32_t channels = 4; channels < 32; channels += 6) {
DWConvMicrokernelTester()
.cr(2)
.kr(9)
.channels(channels)
.input_offset(80)
- .Test(xnn_qu8_dwconv_minmax_fp32_ukernel_up2x9__scalar_lrint, xnn_init_qu8_conv_minmax_fp32_scalar_lrint_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
+ .Test(xnn_qu8_dwconv_minmax_fp32_ukernel_up2x9__scalar_lrintf, xnn_init_qu8_conv_minmax_fp32_scalar_lrintf_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
}
}
-TEST(QU8_DWCONV_MINMAX_FP32_UP2X9__SCALAR_LRINT, zero) {
+TEST(QU8_DWCONV_MINMAX_FP32_UP2X9__SCALAR_LRINTF, zero) {
for (uint32_t mz = 0; mz < 9; mz++) {
for (uint32_t channels = 4; channels < 32; channels += 6) {
DWConvMicrokernelTester()
@@ -6302,105 +6302,105 @@
.channels(channels)
.input_offset(80)
.zero_index(mz)
- .Test(xnn_qu8_dwconv_minmax_fp32_ukernel_up2x9__scalar_lrint, xnn_init_qu8_conv_minmax_fp32_scalar_lrint_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
+ .Test(xnn_qu8_dwconv_minmax_fp32_ukernel_up2x9__scalar_lrintf, xnn_init_qu8_conv_minmax_fp32_scalar_lrintf_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
}
}
}
-TEST(QU8_DWCONV_MINMAX_FP32_UP4X9__SCALAR_LRINT, c_eq_4) {
+TEST(QU8_DWCONV_MINMAX_FP32_UP4X9__SCALAR_LRINTF, c_eq_4) {
DWConvMicrokernelTester()
.cr(4)
.kr(9)
.channels(4)
- .Test(xnn_qu8_dwconv_minmax_fp32_ukernel_up4x9__scalar_lrint, xnn_init_qu8_conv_minmax_fp32_scalar_lrint_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
+ .Test(xnn_qu8_dwconv_minmax_fp32_ukernel_up4x9__scalar_lrintf, xnn_init_qu8_conv_minmax_fp32_scalar_lrintf_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
}
-TEST(QU8_DWCONV_MINMAX_FP32_UP4X9__SCALAR_LRINT, c_div_4) {
+TEST(QU8_DWCONV_MINMAX_FP32_UP4X9__SCALAR_LRINTF, c_div_4) {
for (uint32_t channels = 8; channels < 64; channels += 12) {
DWConvMicrokernelTester()
.cr(4)
.kr(9)
.channels(channels)
- .Test(xnn_qu8_dwconv_minmax_fp32_ukernel_up4x9__scalar_lrint, xnn_init_qu8_conv_minmax_fp32_scalar_lrint_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
+ .Test(xnn_qu8_dwconv_minmax_fp32_ukernel_up4x9__scalar_lrintf, xnn_init_qu8_conv_minmax_fp32_scalar_lrintf_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
}
}
-TEST(QU8_DWCONV_MINMAX_FP32_UP4X9__SCALAR_LRINT, c_div_4_with_qmin) {
+TEST(QU8_DWCONV_MINMAX_FP32_UP4X9__SCALAR_LRINTF, c_div_4_with_qmin) {
for (uint32_t channels = 8; channels < 64; channels += 12) {
DWConvMicrokernelTester()
.cr(4)
.kr(9)
.channels(channels)
.qmin(128)
- .Test(xnn_qu8_dwconv_minmax_fp32_ukernel_up4x9__scalar_lrint, xnn_init_qu8_conv_minmax_fp32_scalar_lrint_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
+ .Test(xnn_qu8_dwconv_minmax_fp32_ukernel_up4x9__scalar_lrintf, xnn_init_qu8_conv_minmax_fp32_scalar_lrintf_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
}
}
-TEST(QU8_DWCONV_MINMAX_FP32_UP4X9__SCALAR_LRINT, c_div_4_with_qmax) {
+TEST(QU8_DWCONV_MINMAX_FP32_UP4X9__SCALAR_LRINTF, c_div_4_with_qmax) {
for (uint32_t channels = 8; channels < 64; channels += 12) {
DWConvMicrokernelTester()
.cr(4)
.kr(9)
.channels(channels)
.qmax(128)
- .Test(xnn_qu8_dwconv_minmax_fp32_ukernel_up4x9__scalar_lrint, xnn_init_qu8_conv_minmax_fp32_scalar_lrint_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
+ .Test(xnn_qu8_dwconv_minmax_fp32_ukernel_up4x9__scalar_lrintf, xnn_init_qu8_conv_minmax_fp32_scalar_lrintf_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
}
}
-TEST(QU8_DWCONV_MINMAX_FP32_UP4X9__SCALAR_LRINT, c_lt_4) {
+TEST(QU8_DWCONV_MINMAX_FP32_UP4X9__SCALAR_LRINTF, c_lt_4) {
for (uint32_t channels = 1; channels < 4; channels++) {
DWConvMicrokernelTester()
.cr(4)
.kr(9)
.channels(channels)
- .Test(xnn_qu8_dwconv_minmax_fp32_ukernel_up4x9__scalar_lrint, xnn_init_qu8_conv_minmax_fp32_scalar_lrint_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
+ .Test(xnn_qu8_dwconv_minmax_fp32_ukernel_up4x9__scalar_lrintf, xnn_init_qu8_conv_minmax_fp32_scalar_lrintf_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
}
}
-TEST(QU8_DWCONV_MINMAX_FP32_UP4X9__SCALAR_LRINT, c_gt_4) {
+TEST(QU8_DWCONV_MINMAX_FP32_UP4X9__SCALAR_LRINTF, c_gt_4) {
for (uint32_t channels = 5; channels < 8; channels++) {
DWConvMicrokernelTester()
.cr(4)
.kr(9)
.channels(channels)
- .Test(xnn_qu8_dwconv_minmax_fp32_ukernel_up4x9__scalar_lrint, xnn_init_qu8_conv_minmax_fp32_scalar_lrint_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
+ .Test(xnn_qu8_dwconv_minmax_fp32_ukernel_up4x9__scalar_lrintf, xnn_init_qu8_conv_minmax_fp32_scalar_lrintf_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
}
}
-TEST(QU8_DWCONV_MINMAX_FP32_UP4X9__SCALAR_LRINT, c_gt_4_with_qmin) {
+TEST(QU8_DWCONV_MINMAX_FP32_UP4X9__SCALAR_LRINTF, c_gt_4_with_qmin) {
for (uint32_t channels = 5; channels < 8; channels++) {
DWConvMicrokernelTester()
.cr(4)
.kr(9)
.channels(channels)
.qmin(128)
- .Test(xnn_qu8_dwconv_minmax_fp32_ukernel_up4x9__scalar_lrint, xnn_init_qu8_conv_minmax_fp32_scalar_lrint_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
+ .Test(xnn_qu8_dwconv_minmax_fp32_ukernel_up4x9__scalar_lrintf, xnn_init_qu8_conv_minmax_fp32_scalar_lrintf_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
}
}
-TEST(QU8_DWCONV_MINMAX_FP32_UP4X9__SCALAR_LRINT, c_gt_4_with_qmax) {
+TEST(QU8_DWCONV_MINMAX_FP32_UP4X9__SCALAR_LRINTF, c_gt_4_with_qmax) {
for (uint32_t channels = 5; channels < 8; channels++) {
DWConvMicrokernelTester()
.cr(4)
.kr(9)
.channels(channels)
.qmax(128)
- .Test(xnn_qu8_dwconv_minmax_fp32_ukernel_up4x9__scalar_lrint, xnn_init_qu8_conv_minmax_fp32_scalar_lrint_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
+ .Test(xnn_qu8_dwconv_minmax_fp32_ukernel_up4x9__scalar_lrintf, xnn_init_qu8_conv_minmax_fp32_scalar_lrintf_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
}
}
-TEST(QU8_DWCONV_MINMAX_FP32_UP4X9__SCALAR_LRINT, multipixel) {
+TEST(QU8_DWCONV_MINMAX_FP32_UP4X9__SCALAR_LRINTF, multipixel) {
for (size_t channels = 1; channels <= 20; channels += 3) {
DWConvMicrokernelTester()
.cr(4)
.kr(9)
.channels(channels)
.width(3)
- .Test(xnn_qu8_dwconv_minmax_fp32_ukernel_up4x9__scalar_lrint, xnn_init_qu8_conv_minmax_fp32_scalar_lrint_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
+ .Test(xnn_qu8_dwconv_minmax_fp32_ukernel_up4x9__scalar_lrintf, xnn_init_qu8_conv_minmax_fp32_scalar_lrintf_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
}
}
-TEST(QU8_DWCONV_MINMAX_FP32_UP4X9__SCALAR_LRINT, multipixel_with_step) {
+TEST(QU8_DWCONV_MINMAX_FP32_UP4X9__SCALAR_LRINTF, multipixel_with_step) {
for (size_t channels = 1; channels <= 20; channels += 3) {
for (size_t step = 2; step <= 9; step++) {
DWConvMicrokernelTester()
@@ -6409,12 +6409,12 @@
.channels(channels)
.width(3)
.step(step)
- .Test(xnn_qu8_dwconv_minmax_fp32_ukernel_up4x9__scalar_lrint, xnn_init_qu8_conv_minmax_fp32_scalar_lrint_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
+ .Test(xnn_qu8_dwconv_minmax_fp32_ukernel_up4x9__scalar_lrintf, xnn_init_qu8_conv_minmax_fp32_scalar_lrintf_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
}
}
}
-TEST(QU8_DWCONV_MINMAX_FP32_UP4X9__SCALAR_LRINT, multipixel_with_output_stride) {
+TEST(QU8_DWCONV_MINMAX_FP32_UP4X9__SCALAR_LRINTF, multipixel_with_output_stride) {
for (size_t channels = 1; channels <= 20; channels += 3) {
DWConvMicrokernelTester()
.cr(4)
@@ -6422,11 +6422,11 @@
.channels(4)
.width(5)
.output_stride(23)
- .Test(xnn_qu8_dwconv_minmax_fp32_ukernel_up4x9__scalar_lrint, xnn_init_qu8_conv_minmax_fp32_scalar_lrint_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
+ .Test(xnn_qu8_dwconv_minmax_fp32_ukernel_up4x9__scalar_lrintf, xnn_init_qu8_conv_minmax_fp32_scalar_lrintf_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
}
}
-TEST(QU8_DWCONV_MINMAX_FP32_UP4X9__SCALAR_LRINT, multipixel_with_qmin) {
+TEST(QU8_DWCONV_MINMAX_FP32_UP4X9__SCALAR_LRINTF, multipixel_with_qmin) {
for (size_t channels = 1; channels <= 20; channels += 3) {
DWConvMicrokernelTester()
.cr(4)
@@ -6434,11 +6434,11 @@
.channels(channels)
.width(3)
.qmin(128)
- .Test(xnn_qu8_dwconv_minmax_fp32_ukernel_up4x9__scalar_lrint, xnn_init_qu8_conv_minmax_fp32_scalar_lrint_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
+ .Test(xnn_qu8_dwconv_minmax_fp32_ukernel_up4x9__scalar_lrintf, xnn_init_qu8_conv_minmax_fp32_scalar_lrintf_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
}
}
-TEST(QU8_DWCONV_MINMAX_FP32_UP4X9__SCALAR_LRINT, multipixel_with_qmax) {
+TEST(QU8_DWCONV_MINMAX_FP32_UP4X9__SCALAR_LRINTF, multipixel_with_qmax) {
for (size_t channels = 1; channels <= 20; channels += 3) {
DWConvMicrokernelTester()
.cr(4)
@@ -6446,11 +6446,11 @@
.channels(channels)
.width(3)
.qmax(128)
- .Test(xnn_qu8_dwconv_minmax_fp32_ukernel_up4x9__scalar_lrint, xnn_init_qu8_conv_minmax_fp32_scalar_lrint_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
+ .Test(xnn_qu8_dwconv_minmax_fp32_ukernel_up4x9__scalar_lrintf, xnn_init_qu8_conv_minmax_fp32_scalar_lrintf_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
}
}
-TEST(QU8_DWCONV_MINMAX_FP32_UP4X9__SCALAR_LRINT, input_zero_point_only) {
+TEST(QU8_DWCONV_MINMAX_FP32_UP4X9__SCALAR_LRINTF, input_zero_point_only) {
for (size_t channels = 1; channels <= 20; channels += 3) {
DWConvMicrokernelTester()
.cr(4)
@@ -6459,11 +6459,11 @@
.width(3)
.input_zero_point(255)
.kernel_zero_point(0)
- .Test(xnn_qu8_dwconv_minmax_fp32_ukernel_up4x9__scalar_lrint, xnn_init_qu8_conv_minmax_fp32_scalar_lrint_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
+ .Test(xnn_qu8_dwconv_minmax_fp32_ukernel_up4x9__scalar_lrintf, xnn_init_qu8_conv_minmax_fp32_scalar_lrintf_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
}
}
-TEST(QU8_DWCONV_MINMAX_FP32_UP4X9__SCALAR_LRINT, kernel_zero_point_only) {
+TEST(QU8_DWCONV_MINMAX_FP32_UP4X9__SCALAR_LRINTF, kernel_zero_point_only) {
for (size_t channels = 1; channels <= 20; channels += 3) {
DWConvMicrokernelTester()
.cr(4)
@@ -6472,22 +6472,22 @@
.width(3)
.input_zero_point(0)
.kernel_zero_point(255)
- .Test(xnn_qu8_dwconv_minmax_fp32_ukernel_up4x9__scalar_lrint, xnn_init_qu8_conv_minmax_fp32_scalar_lrint_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
+ .Test(xnn_qu8_dwconv_minmax_fp32_ukernel_up4x9__scalar_lrintf, xnn_init_qu8_conv_minmax_fp32_scalar_lrintf_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
}
}
-TEST(QU8_DWCONV_MINMAX_FP32_UP4X9__SCALAR_LRINT, input_offset) {
+TEST(QU8_DWCONV_MINMAX_FP32_UP4X9__SCALAR_LRINTF, input_offset) {
for (uint32_t channels = 8; channels < 64; channels += 12) {
DWConvMicrokernelTester()
.cr(4)
.kr(9)
.channels(channels)
.input_offset(112)
- .Test(xnn_qu8_dwconv_minmax_fp32_ukernel_up4x9__scalar_lrint, xnn_init_qu8_conv_minmax_fp32_scalar_lrint_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
+ .Test(xnn_qu8_dwconv_minmax_fp32_ukernel_up4x9__scalar_lrintf, xnn_init_qu8_conv_minmax_fp32_scalar_lrintf_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
}
}
-TEST(QU8_DWCONV_MINMAX_FP32_UP4X9__SCALAR_LRINT, zero) {
+TEST(QU8_DWCONV_MINMAX_FP32_UP4X9__SCALAR_LRINTF, zero) {
for (uint32_t mz = 0; mz < 9; mz++) {
for (uint32_t channels = 8; channels < 64; channels += 12) {
DWConvMicrokernelTester()
@@ -6496,63 +6496,63 @@
.channels(channels)
.input_offset(112)
.zero_index(mz)
- .Test(xnn_qu8_dwconv_minmax_fp32_ukernel_up4x9__scalar_lrint, xnn_init_qu8_conv_minmax_fp32_scalar_lrint_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
+ .Test(xnn_qu8_dwconv_minmax_fp32_ukernel_up4x9__scalar_lrintf, xnn_init_qu8_conv_minmax_fp32_scalar_lrintf_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
}
}
}
-TEST(QU8_DWCONV_MINMAX_FP32_UP1X9__SCALAR_MAGIC, c_eq_1) {
+TEST(QU8_DWCONV_MINMAX_FP32_UP1X9__SCALAR_FMAGIC, c_eq_1) {
DWConvMicrokernelTester()
.cr(1)
.kr(9)
.channels(1)
- .Test(xnn_qu8_dwconv_minmax_fp32_ukernel_up1x9__scalar_magic, xnn_init_qu8_conv_minmax_fp32_scalar_magic_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
+ .Test(xnn_qu8_dwconv_minmax_fp32_ukernel_up1x9__scalar_fmagic, xnn_init_qu8_conv_minmax_fp32_scalar_fmagic_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
}
-TEST(QU8_DWCONV_MINMAX_FP32_UP1X9__SCALAR_MAGIC, c_gt_1) {
+TEST(QU8_DWCONV_MINMAX_FP32_UP1X9__SCALAR_FMAGIC, c_gt_1) {
for (uint32_t channels = 2; channels < 10; channels++) {
DWConvMicrokernelTester()
.cr(1)
.kr(9)
.channels(channels)
- .Test(xnn_qu8_dwconv_minmax_fp32_ukernel_up1x9__scalar_magic, xnn_init_qu8_conv_minmax_fp32_scalar_magic_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
+ .Test(xnn_qu8_dwconv_minmax_fp32_ukernel_up1x9__scalar_fmagic, xnn_init_qu8_conv_minmax_fp32_scalar_fmagic_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
}
}
-TEST(QU8_DWCONV_MINMAX_FP32_UP1X9__SCALAR_MAGIC, c_gt_1_with_qmin) {
+TEST(QU8_DWCONV_MINMAX_FP32_UP1X9__SCALAR_FMAGIC, c_gt_1_with_qmin) {
for (uint32_t channels = 2; channels < 10; channels++) {
DWConvMicrokernelTester()
.cr(1)
.kr(9)
.channels(channels)
.qmin(128)
- .Test(xnn_qu8_dwconv_minmax_fp32_ukernel_up1x9__scalar_magic, xnn_init_qu8_conv_minmax_fp32_scalar_magic_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
+ .Test(xnn_qu8_dwconv_minmax_fp32_ukernel_up1x9__scalar_fmagic, xnn_init_qu8_conv_minmax_fp32_scalar_fmagic_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
}
}
-TEST(QU8_DWCONV_MINMAX_FP32_UP1X9__SCALAR_MAGIC, c_gt_1_with_qmax) {
+TEST(QU8_DWCONV_MINMAX_FP32_UP1X9__SCALAR_FMAGIC, c_gt_1_with_qmax) {
for (uint32_t channels = 2; channels < 10; channels++) {
DWConvMicrokernelTester()
.cr(1)
.kr(9)
.channels(channels)
.qmax(128)
- .Test(xnn_qu8_dwconv_minmax_fp32_ukernel_up1x9__scalar_magic, xnn_init_qu8_conv_minmax_fp32_scalar_magic_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
+ .Test(xnn_qu8_dwconv_minmax_fp32_ukernel_up1x9__scalar_fmagic, xnn_init_qu8_conv_minmax_fp32_scalar_fmagic_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
}
}
-TEST(QU8_DWCONV_MINMAX_FP32_UP1X9__SCALAR_MAGIC, multipixel) {
+TEST(QU8_DWCONV_MINMAX_FP32_UP1X9__SCALAR_FMAGIC, multipixel) {
for (size_t channels = 1; channels <= 5; channels += 1) {
DWConvMicrokernelTester()
.cr(1)
.kr(9)
.channels(channels)
.width(3)
- .Test(xnn_qu8_dwconv_minmax_fp32_ukernel_up1x9__scalar_magic, xnn_init_qu8_conv_minmax_fp32_scalar_magic_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
+ .Test(xnn_qu8_dwconv_minmax_fp32_ukernel_up1x9__scalar_fmagic, xnn_init_qu8_conv_minmax_fp32_scalar_fmagic_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
}
}
-TEST(QU8_DWCONV_MINMAX_FP32_UP1X9__SCALAR_MAGIC, multipixel_with_step) {
+TEST(QU8_DWCONV_MINMAX_FP32_UP1X9__SCALAR_FMAGIC, multipixel_with_step) {
for (size_t channels = 1; channels <= 5; channels += 1) {
for (size_t step = 2; step <= 9; step++) {
DWConvMicrokernelTester()
@@ -6561,12 +6561,12 @@
.channels(channels)
.width(3)
.step(step)
- .Test(xnn_qu8_dwconv_minmax_fp32_ukernel_up1x9__scalar_magic, xnn_init_qu8_conv_minmax_fp32_scalar_magic_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
+ .Test(xnn_qu8_dwconv_minmax_fp32_ukernel_up1x9__scalar_fmagic, xnn_init_qu8_conv_minmax_fp32_scalar_fmagic_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
}
}
}
-TEST(QU8_DWCONV_MINMAX_FP32_UP1X9__SCALAR_MAGIC, multipixel_with_output_stride) {
+TEST(QU8_DWCONV_MINMAX_FP32_UP1X9__SCALAR_FMAGIC, multipixel_with_output_stride) {
for (size_t channels = 1; channels <= 5; channels += 1) {
DWConvMicrokernelTester()
.cr(1)
@@ -6574,11 +6574,11 @@
.channels(1)
.width(5)
.output_stride(7)
- .Test(xnn_qu8_dwconv_minmax_fp32_ukernel_up1x9__scalar_magic, xnn_init_qu8_conv_minmax_fp32_scalar_magic_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
+ .Test(xnn_qu8_dwconv_minmax_fp32_ukernel_up1x9__scalar_fmagic, xnn_init_qu8_conv_minmax_fp32_scalar_fmagic_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
}
}
-TEST(QU8_DWCONV_MINMAX_FP32_UP1X9__SCALAR_MAGIC, multipixel_with_qmin) {
+TEST(QU8_DWCONV_MINMAX_FP32_UP1X9__SCALAR_FMAGIC, multipixel_with_qmin) {
for (size_t channels = 1; channels <= 5; channels += 1) {
DWConvMicrokernelTester()
.cr(1)
@@ -6586,11 +6586,11 @@
.channels(channels)
.width(3)
.qmin(128)
- .Test(xnn_qu8_dwconv_minmax_fp32_ukernel_up1x9__scalar_magic, xnn_init_qu8_conv_minmax_fp32_scalar_magic_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
+ .Test(xnn_qu8_dwconv_minmax_fp32_ukernel_up1x9__scalar_fmagic, xnn_init_qu8_conv_minmax_fp32_scalar_fmagic_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
}
}
-TEST(QU8_DWCONV_MINMAX_FP32_UP1X9__SCALAR_MAGIC, multipixel_with_qmax) {
+TEST(QU8_DWCONV_MINMAX_FP32_UP1X9__SCALAR_FMAGIC, multipixel_with_qmax) {
for (size_t channels = 1; channels <= 5; channels += 1) {
DWConvMicrokernelTester()
.cr(1)
@@ -6598,11 +6598,11 @@
.channels(channels)
.width(3)
.qmax(128)
- .Test(xnn_qu8_dwconv_minmax_fp32_ukernel_up1x9__scalar_magic, xnn_init_qu8_conv_minmax_fp32_scalar_magic_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
+ .Test(xnn_qu8_dwconv_minmax_fp32_ukernel_up1x9__scalar_fmagic, xnn_init_qu8_conv_minmax_fp32_scalar_fmagic_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
}
}
-TEST(QU8_DWCONV_MINMAX_FP32_UP1X9__SCALAR_MAGIC, input_zero_point_only) {
+TEST(QU8_DWCONV_MINMAX_FP32_UP1X9__SCALAR_FMAGIC, input_zero_point_only) {
for (size_t channels = 1; channels <= 5; channels += 1) {
DWConvMicrokernelTester()
.cr(1)
@@ -6611,11 +6611,11 @@
.width(3)
.input_zero_point(255)
.kernel_zero_point(0)
- .Test(xnn_qu8_dwconv_minmax_fp32_ukernel_up1x9__scalar_magic, xnn_init_qu8_conv_minmax_fp32_scalar_magic_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
+ .Test(xnn_qu8_dwconv_minmax_fp32_ukernel_up1x9__scalar_fmagic, xnn_init_qu8_conv_minmax_fp32_scalar_fmagic_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
}
}
-TEST(QU8_DWCONV_MINMAX_FP32_UP1X9__SCALAR_MAGIC, kernel_zero_point_only) {
+TEST(QU8_DWCONV_MINMAX_FP32_UP1X9__SCALAR_FMAGIC, kernel_zero_point_only) {
for (size_t channels = 1; channels <= 5; channels += 1) {
DWConvMicrokernelTester()
.cr(1)
@@ -6624,22 +6624,22 @@
.width(3)
.input_zero_point(0)
.kernel_zero_point(255)
- .Test(xnn_qu8_dwconv_minmax_fp32_ukernel_up1x9__scalar_magic, xnn_init_qu8_conv_minmax_fp32_scalar_magic_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
+ .Test(xnn_qu8_dwconv_minmax_fp32_ukernel_up1x9__scalar_fmagic, xnn_init_qu8_conv_minmax_fp32_scalar_fmagic_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
}
}
-TEST(QU8_DWCONV_MINMAX_FP32_UP1X9__SCALAR_MAGIC, input_offset) {
+TEST(QU8_DWCONV_MINMAX_FP32_UP1X9__SCALAR_FMAGIC, input_offset) {
for (uint32_t channels = 2; channels < 16; channels += 3) {
DWConvMicrokernelTester()
.cr(1)
.kr(9)
.channels(channels)
.input_offset(48)
- .Test(xnn_qu8_dwconv_minmax_fp32_ukernel_up1x9__scalar_magic, xnn_init_qu8_conv_minmax_fp32_scalar_magic_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
+ .Test(xnn_qu8_dwconv_minmax_fp32_ukernel_up1x9__scalar_fmagic, xnn_init_qu8_conv_minmax_fp32_scalar_fmagic_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
}
}
-TEST(QU8_DWCONV_MINMAX_FP32_UP1X9__SCALAR_MAGIC, zero) {
+TEST(QU8_DWCONV_MINMAX_FP32_UP1X9__SCALAR_FMAGIC, zero) {
for (uint32_t mz = 0; mz < 9; mz++) {
for (uint32_t channels = 2; channels < 16; channels += 3) {
DWConvMicrokernelTester()
@@ -6648,105 +6648,105 @@
.channels(channels)
.input_offset(48)
.zero_index(mz)
- .Test(xnn_qu8_dwconv_minmax_fp32_ukernel_up1x9__scalar_magic, xnn_init_qu8_conv_minmax_fp32_scalar_magic_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
+ .Test(xnn_qu8_dwconv_minmax_fp32_ukernel_up1x9__scalar_fmagic, xnn_init_qu8_conv_minmax_fp32_scalar_fmagic_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
}
}
}
-TEST(QU8_DWCONV_MINMAX_FP32_UP2X9__SCALAR_MAGIC, c_eq_2) {
+TEST(QU8_DWCONV_MINMAX_FP32_UP2X9__SCALAR_FMAGIC, c_eq_2) {
DWConvMicrokernelTester()
.cr(2)
.kr(9)
.channels(2)
- .Test(xnn_qu8_dwconv_minmax_fp32_ukernel_up2x9__scalar_magic, xnn_init_qu8_conv_minmax_fp32_scalar_magic_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
+ .Test(xnn_qu8_dwconv_minmax_fp32_ukernel_up2x9__scalar_fmagic, xnn_init_qu8_conv_minmax_fp32_scalar_fmagic_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
}
-TEST(QU8_DWCONV_MINMAX_FP32_UP2X9__SCALAR_MAGIC, c_div_2) {
+TEST(QU8_DWCONV_MINMAX_FP32_UP2X9__SCALAR_FMAGIC, c_div_2) {
for (uint32_t channels = 4; channels < 32; channels += 6) {
DWConvMicrokernelTester()
.cr(2)
.kr(9)
.channels(channels)
- .Test(xnn_qu8_dwconv_minmax_fp32_ukernel_up2x9__scalar_magic, xnn_init_qu8_conv_minmax_fp32_scalar_magic_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
+ .Test(xnn_qu8_dwconv_minmax_fp32_ukernel_up2x9__scalar_fmagic, xnn_init_qu8_conv_minmax_fp32_scalar_fmagic_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
}
}
-TEST(QU8_DWCONV_MINMAX_FP32_UP2X9__SCALAR_MAGIC, c_div_2_with_qmin) {
+TEST(QU8_DWCONV_MINMAX_FP32_UP2X9__SCALAR_FMAGIC, c_div_2_with_qmin) {
for (uint32_t channels = 4; channels < 32; channels += 6) {
DWConvMicrokernelTester()
.cr(2)
.kr(9)
.channels(channels)
.qmin(128)
- .Test(xnn_qu8_dwconv_minmax_fp32_ukernel_up2x9__scalar_magic, xnn_init_qu8_conv_minmax_fp32_scalar_magic_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
+ .Test(xnn_qu8_dwconv_minmax_fp32_ukernel_up2x9__scalar_fmagic, xnn_init_qu8_conv_minmax_fp32_scalar_fmagic_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
}
}
-TEST(QU8_DWCONV_MINMAX_FP32_UP2X9__SCALAR_MAGIC, c_div_2_with_qmax) {
+TEST(QU8_DWCONV_MINMAX_FP32_UP2X9__SCALAR_FMAGIC, c_div_2_with_qmax) {
for (uint32_t channels = 4; channels < 32; channels += 6) {
DWConvMicrokernelTester()
.cr(2)
.kr(9)
.channels(channels)
.qmax(128)
- .Test(xnn_qu8_dwconv_minmax_fp32_ukernel_up2x9__scalar_magic, xnn_init_qu8_conv_minmax_fp32_scalar_magic_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
+ .Test(xnn_qu8_dwconv_minmax_fp32_ukernel_up2x9__scalar_fmagic, xnn_init_qu8_conv_minmax_fp32_scalar_fmagic_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
}
}
-TEST(QU8_DWCONV_MINMAX_FP32_UP2X9__SCALAR_MAGIC, c_lt_2) {
+TEST(QU8_DWCONV_MINMAX_FP32_UP2X9__SCALAR_FMAGIC, c_lt_2) {
for (uint32_t channels = 1; channels < 2; channels++) {
DWConvMicrokernelTester()
.cr(2)
.kr(9)
.channels(channels)
- .Test(xnn_qu8_dwconv_minmax_fp32_ukernel_up2x9__scalar_magic, xnn_init_qu8_conv_minmax_fp32_scalar_magic_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
+ .Test(xnn_qu8_dwconv_minmax_fp32_ukernel_up2x9__scalar_fmagic, xnn_init_qu8_conv_minmax_fp32_scalar_fmagic_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
}
}
-TEST(QU8_DWCONV_MINMAX_FP32_UP2X9__SCALAR_MAGIC, c_gt_2) {
+TEST(QU8_DWCONV_MINMAX_FP32_UP2X9__SCALAR_FMAGIC, c_gt_2) {
for (uint32_t channels = 3; channels < 4; channels++) {
DWConvMicrokernelTester()
.cr(2)
.kr(9)
.channels(channels)
- .Test(xnn_qu8_dwconv_minmax_fp32_ukernel_up2x9__scalar_magic, xnn_init_qu8_conv_minmax_fp32_scalar_magic_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
+ .Test(xnn_qu8_dwconv_minmax_fp32_ukernel_up2x9__scalar_fmagic, xnn_init_qu8_conv_minmax_fp32_scalar_fmagic_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
}
}
-TEST(QU8_DWCONV_MINMAX_FP32_UP2X9__SCALAR_MAGIC, c_gt_2_with_qmin) {
+TEST(QU8_DWCONV_MINMAX_FP32_UP2X9__SCALAR_FMAGIC, c_gt_2_with_qmin) {
for (uint32_t channels = 3; channels < 4; channels++) {
DWConvMicrokernelTester()
.cr(2)
.kr(9)
.channels(channels)
.qmin(128)
- .Test(xnn_qu8_dwconv_minmax_fp32_ukernel_up2x9__scalar_magic, xnn_init_qu8_conv_minmax_fp32_scalar_magic_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
+ .Test(xnn_qu8_dwconv_minmax_fp32_ukernel_up2x9__scalar_fmagic, xnn_init_qu8_conv_minmax_fp32_scalar_fmagic_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
}
}
-TEST(QU8_DWCONV_MINMAX_FP32_UP2X9__SCALAR_MAGIC, c_gt_2_with_qmax) {
+TEST(QU8_DWCONV_MINMAX_FP32_UP2X9__SCALAR_FMAGIC, c_gt_2_with_qmax) {
for (uint32_t channels = 3; channels < 4; channels++) {
DWConvMicrokernelTester()
.cr(2)
.kr(9)
.channels(channels)
.qmax(128)
- .Test(xnn_qu8_dwconv_minmax_fp32_ukernel_up2x9__scalar_magic, xnn_init_qu8_conv_minmax_fp32_scalar_magic_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
+ .Test(xnn_qu8_dwconv_minmax_fp32_ukernel_up2x9__scalar_fmagic, xnn_init_qu8_conv_minmax_fp32_scalar_fmagic_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
}
}
-TEST(QU8_DWCONV_MINMAX_FP32_UP2X9__SCALAR_MAGIC, multipixel) {
+TEST(QU8_DWCONV_MINMAX_FP32_UP2X9__SCALAR_FMAGIC, multipixel) {
for (size_t channels = 1; channels <= 10; channels += 1) {
DWConvMicrokernelTester()
.cr(2)
.kr(9)
.channels(channels)
.width(3)
- .Test(xnn_qu8_dwconv_minmax_fp32_ukernel_up2x9__scalar_magic, xnn_init_qu8_conv_minmax_fp32_scalar_magic_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
+ .Test(xnn_qu8_dwconv_minmax_fp32_ukernel_up2x9__scalar_fmagic, xnn_init_qu8_conv_minmax_fp32_scalar_fmagic_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
}
}
-TEST(QU8_DWCONV_MINMAX_FP32_UP2X9__SCALAR_MAGIC, multipixel_with_step) {
+TEST(QU8_DWCONV_MINMAX_FP32_UP2X9__SCALAR_FMAGIC, multipixel_with_step) {
for (size_t channels = 1; channels <= 10; channels += 1) {
for (size_t step = 2; step <= 9; step++) {
DWConvMicrokernelTester()
@@ -6755,12 +6755,12 @@
.channels(channels)
.width(3)
.step(step)
- .Test(xnn_qu8_dwconv_minmax_fp32_ukernel_up2x9__scalar_magic, xnn_init_qu8_conv_minmax_fp32_scalar_magic_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
+ .Test(xnn_qu8_dwconv_minmax_fp32_ukernel_up2x9__scalar_fmagic, xnn_init_qu8_conv_minmax_fp32_scalar_fmagic_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
}
}
}
-TEST(QU8_DWCONV_MINMAX_FP32_UP2X9__SCALAR_MAGIC, multipixel_with_output_stride) {
+TEST(QU8_DWCONV_MINMAX_FP32_UP2X9__SCALAR_FMAGIC, multipixel_with_output_stride) {
for (size_t channels = 1; channels <= 10; channels += 1) {
DWConvMicrokernelTester()
.cr(2)
@@ -6768,11 +6768,11 @@
.channels(2)
.width(5)
.output_stride(13)
- .Test(xnn_qu8_dwconv_minmax_fp32_ukernel_up2x9__scalar_magic, xnn_init_qu8_conv_minmax_fp32_scalar_magic_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
+ .Test(xnn_qu8_dwconv_minmax_fp32_ukernel_up2x9__scalar_fmagic, xnn_init_qu8_conv_minmax_fp32_scalar_fmagic_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
}
}
-TEST(QU8_DWCONV_MINMAX_FP32_UP2X9__SCALAR_MAGIC, multipixel_with_qmin) {
+TEST(QU8_DWCONV_MINMAX_FP32_UP2X9__SCALAR_FMAGIC, multipixel_with_qmin) {
for (size_t channels = 1; channels <= 10; channels += 1) {
DWConvMicrokernelTester()
.cr(2)
@@ -6780,11 +6780,11 @@
.channels(channels)
.width(3)
.qmin(128)
- .Test(xnn_qu8_dwconv_minmax_fp32_ukernel_up2x9__scalar_magic, xnn_init_qu8_conv_minmax_fp32_scalar_magic_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
+ .Test(xnn_qu8_dwconv_minmax_fp32_ukernel_up2x9__scalar_fmagic, xnn_init_qu8_conv_minmax_fp32_scalar_fmagic_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
}
}
-TEST(QU8_DWCONV_MINMAX_FP32_UP2X9__SCALAR_MAGIC, multipixel_with_qmax) {
+TEST(QU8_DWCONV_MINMAX_FP32_UP2X9__SCALAR_FMAGIC, multipixel_with_qmax) {
for (size_t channels = 1; channels <= 10; channels += 1) {
DWConvMicrokernelTester()
.cr(2)
@@ -6792,11 +6792,11 @@
.channels(channels)
.width(3)
.qmax(128)
- .Test(xnn_qu8_dwconv_minmax_fp32_ukernel_up2x9__scalar_magic, xnn_init_qu8_conv_minmax_fp32_scalar_magic_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
+ .Test(xnn_qu8_dwconv_minmax_fp32_ukernel_up2x9__scalar_fmagic, xnn_init_qu8_conv_minmax_fp32_scalar_fmagic_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
}
}
-TEST(QU8_DWCONV_MINMAX_FP32_UP2X9__SCALAR_MAGIC, input_zero_point_only) {
+TEST(QU8_DWCONV_MINMAX_FP32_UP2X9__SCALAR_FMAGIC, input_zero_point_only) {
for (size_t channels = 1; channels <= 10; channels += 1) {
DWConvMicrokernelTester()
.cr(2)
@@ -6805,11 +6805,11 @@
.width(3)
.input_zero_point(255)
.kernel_zero_point(0)
- .Test(xnn_qu8_dwconv_minmax_fp32_ukernel_up2x9__scalar_magic, xnn_init_qu8_conv_minmax_fp32_scalar_magic_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
+ .Test(xnn_qu8_dwconv_minmax_fp32_ukernel_up2x9__scalar_fmagic, xnn_init_qu8_conv_minmax_fp32_scalar_fmagic_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
}
}
-TEST(QU8_DWCONV_MINMAX_FP32_UP2X9__SCALAR_MAGIC, kernel_zero_point_only) {
+TEST(QU8_DWCONV_MINMAX_FP32_UP2X9__SCALAR_FMAGIC, kernel_zero_point_only) {
for (size_t channels = 1; channels <= 10; channels += 1) {
DWConvMicrokernelTester()
.cr(2)
@@ -6818,22 +6818,22 @@
.width(3)
.input_zero_point(0)
.kernel_zero_point(255)
- .Test(xnn_qu8_dwconv_minmax_fp32_ukernel_up2x9__scalar_magic, xnn_init_qu8_conv_minmax_fp32_scalar_magic_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
+ .Test(xnn_qu8_dwconv_minmax_fp32_ukernel_up2x9__scalar_fmagic, xnn_init_qu8_conv_minmax_fp32_scalar_fmagic_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
}
}
-TEST(QU8_DWCONV_MINMAX_FP32_UP2X9__SCALAR_MAGIC, input_offset) {
+TEST(QU8_DWCONV_MINMAX_FP32_UP2X9__SCALAR_FMAGIC, input_offset) {
for (uint32_t channels = 4; channels < 32; channels += 6) {
DWConvMicrokernelTester()
.cr(2)
.kr(9)
.channels(channels)
.input_offset(80)
- .Test(xnn_qu8_dwconv_minmax_fp32_ukernel_up2x9__scalar_magic, xnn_init_qu8_conv_minmax_fp32_scalar_magic_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
+ .Test(xnn_qu8_dwconv_minmax_fp32_ukernel_up2x9__scalar_fmagic, xnn_init_qu8_conv_minmax_fp32_scalar_fmagic_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
}
}
-TEST(QU8_DWCONV_MINMAX_FP32_UP2X9__SCALAR_MAGIC, zero) {
+TEST(QU8_DWCONV_MINMAX_FP32_UP2X9__SCALAR_FMAGIC, zero) {
for (uint32_t mz = 0; mz < 9; mz++) {
for (uint32_t channels = 4; channels < 32; channels += 6) {
DWConvMicrokernelTester()
@@ -6842,105 +6842,105 @@
.channels(channels)
.input_offset(80)
.zero_index(mz)
- .Test(xnn_qu8_dwconv_minmax_fp32_ukernel_up2x9__scalar_magic, xnn_init_qu8_conv_minmax_fp32_scalar_magic_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
+ .Test(xnn_qu8_dwconv_minmax_fp32_ukernel_up2x9__scalar_fmagic, xnn_init_qu8_conv_minmax_fp32_scalar_fmagic_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
}
}
}
-TEST(QU8_DWCONV_MINMAX_FP32_UP4X9__SCALAR_MAGIC, c_eq_4) {
+TEST(QU8_DWCONV_MINMAX_FP32_UP4X9__SCALAR_FMAGIC, c_eq_4) {
DWConvMicrokernelTester()
.cr(4)
.kr(9)
.channels(4)
- .Test(xnn_qu8_dwconv_minmax_fp32_ukernel_up4x9__scalar_magic, xnn_init_qu8_conv_minmax_fp32_scalar_magic_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
+ .Test(xnn_qu8_dwconv_minmax_fp32_ukernel_up4x9__scalar_fmagic, xnn_init_qu8_conv_minmax_fp32_scalar_fmagic_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
}
-TEST(QU8_DWCONV_MINMAX_FP32_UP4X9__SCALAR_MAGIC, c_div_4) {
+TEST(QU8_DWCONV_MINMAX_FP32_UP4X9__SCALAR_FMAGIC, c_div_4) {
for (uint32_t channels = 8; channels < 64; channels += 12) {
DWConvMicrokernelTester()
.cr(4)
.kr(9)
.channels(channels)
- .Test(xnn_qu8_dwconv_minmax_fp32_ukernel_up4x9__scalar_magic, xnn_init_qu8_conv_minmax_fp32_scalar_magic_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
+ .Test(xnn_qu8_dwconv_minmax_fp32_ukernel_up4x9__scalar_fmagic, xnn_init_qu8_conv_minmax_fp32_scalar_fmagic_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
}
}
-TEST(QU8_DWCONV_MINMAX_FP32_UP4X9__SCALAR_MAGIC, c_div_4_with_qmin) {
+TEST(QU8_DWCONV_MINMAX_FP32_UP4X9__SCALAR_FMAGIC, c_div_4_with_qmin) {
for (uint32_t channels = 8; channels < 64; channels += 12) {
DWConvMicrokernelTester()
.cr(4)
.kr(9)
.channels(channels)
.qmin(128)
- .Test(xnn_qu8_dwconv_minmax_fp32_ukernel_up4x9__scalar_magic, xnn_init_qu8_conv_minmax_fp32_scalar_magic_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
+ .Test(xnn_qu8_dwconv_minmax_fp32_ukernel_up4x9__scalar_fmagic, xnn_init_qu8_conv_minmax_fp32_scalar_fmagic_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
}
}
-TEST(QU8_DWCONV_MINMAX_FP32_UP4X9__SCALAR_MAGIC, c_div_4_with_qmax) {
+TEST(QU8_DWCONV_MINMAX_FP32_UP4X9__SCALAR_FMAGIC, c_div_4_with_qmax) {
for (uint32_t channels = 8; channels < 64; channels += 12) {
DWConvMicrokernelTester()
.cr(4)
.kr(9)
.channels(channels)
.qmax(128)
- .Test(xnn_qu8_dwconv_minmax_fp32_ukernel_up4x9__scalar_magic, xnn_init_qu8_conv_minmax_fp32_scalar_magic_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
+ .Test(xnn_qu8_dwconv_minmax_fp32_ukernel_up4x9__scalar_fmagic, xnn_init_qu8_conv_minmax_fp32_scalar_fmagic_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
}
}
-TEST(QU8_DWCONV_MINMAX_FP32_UP4X9__SCALAR_MAGIC, c_lt_4) {
+TEST(QU8_DWCONV_MINMAX_FP32_UP4X9__SCALAR_FMAGIC, c_lt_4) {
for (uint32_t channels = 1; channels < 4; channels++) {
DWConvMicrokernelTester()
.cr(4)
.kr(9)
.channels(channels)
- .Test(xnn_qu8_dwconv_minmax_fp32_ukernel_up4x9__scalar_magic, xnn_init_qu8_conv_minmax_fp32_scalar_magic_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
+ .Test(xnn_qu8_dwconv_minmax_fp32_ukernel_up4x9__scalar_fmagic, xnn_init_qu8_conv_minmax_fp32_scalar_fmagic_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
}
}
-TEST(QU8_DWCONV_MINMAX_FP32_UP4X9__SCALAR_MAGIC, c_gt_4) {
+TEST(QU8_DWCONV_MINMAX_FP32_UP4X9__SCALAR_FMAGIC, c_gt_4) {
for (uint32_t channels = 5; channels < 8; channels++) {
DWConvMicrokernelTester()
.cr(4)
.kr(9)
.channels(channels)
- .Test(xnn_qu8_dwconv_minmax_fp32_ukernel_up4x9__scalar_magic, xnn_init_qu8_conv_minmax_fp32_scalar_magic_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
+ .Test(xnn_qu8_dwconv_minmax_fp32_ukernel_up4x9__scalar_fmagic, xnn_init_qu8_conv_minmax_fp32_scalar_fmagic_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
}
}
-TEST(QU8_DWCONV_MINMAX_FP32_UP4X9__SCALAR_MAGIC, c_gt_4_with_qmin) {
+TEST(QU8_DWCONV_MINMAX_FP32_UP4X9__SCALAR_FMAGIC, c_gt_4_with_qmin) {
for (uint32_t channels = 5; channels < 8; channels++) {
DWConvMicrokernelTester()
.cr(4)
.kr(9)
.channels(channels)
.qmin(128)
- .Test(xnn_qu8_dwconv_minmax_fp32_ukernel_up4x9__scalar_magic, xnn_init_qu8_conv_minmax_fp32_scalar_magic_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
+ .Test(xnn_qu8_dwconv_minmax_fp32_ukernel_up4x9__scalar_fmagic, xnn_init_qu8_conv_minmax_fp32_scalar_fmagic_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
}
}
-TEST(QU8_DWCONV_MINMAX_FP32_UP4X9__SCALAR_MAGIC, c_gt_4_with_qmax) {
+TEST(QU8_DWCONV_MINMAX_FP32_UP4X9__SCALAR_FMAGIC, c_gt_4_with_qmax) {
for (uint32_t channels = 5; channels < 8; channels++) {
DWConvMicrokernelTester()
.cr(4)
.kr(9)
.channels(channels)
.qmax(128)
- .Test(xnn_qu8_dwconv_minmax_fp32_ukernel_up4x9__scalar_magic, xnn_init_qu8_conv_minmax_fp32_scalar_magic_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
+ .Test(xnn_qu8_dwconv_minmax_fp32_ukernel_up4x9__scalar_fmagic, xnn_init_qu8_conv_minmax_fp32_scalar_fmagic_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
}
}
-TEST(QU8_DWCONV_MINMAX_FP32_UP4X9__SCALAR_MAGIC, multipixel) {
+TEST(QU8_DWCONV_MINMAX_FP32_UP4X9__SCALAR_FMAGIC, multipixel) {
for (size_t channels = 1; channels <= 20; channels += 3) {
DWConvMicrokernelTester()
.cr(4)
.kr(9)
.channels(channels)
.width(3)
- .Test(xnn_qu8_dwconv_minmax_fp32_ukernel_up4x9__scalar_magic, xnn_init_qu8_conv_minmax_fp32_scalar_magic_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
+ .Test(xnn_qu8_dwconv_minmax_fp32_ukernel_up4x9__scalar_fmagic, xnn_init_qu8_conv_minmax_fp32_scalar_fmagic_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
}
}
-TEST(QU8_DWCONV_MINMAX_FP32_UP4X9__SCALAR_MAGIC, multipixel_with_step) {
+TEST(QU8_DWCONV_MINMAX_FP32_UP4X9__SCALAR_FMAGIC, multipixel_with_step) {
for (size_t channels = 1; channels <= 20; channels += 3) {
for (size_t step = 2; step <= 9; step++) {
DWConvMicrokernelTester()
@@ -6949,12 +6949,12 @@
.channels(channels)
.width(3)
.step(step)
- .Test(xnn_qu8_dwconv_minmax_fp32_ukernel_up4x9__scalar_magic, xnn_init_qu8_conv_minmax_fp32_scalar_magic_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
+ .Test(xnn_qu8_dwconv_minmax_fp32_ukernel_up4x9__scalar_fmagic, xnn_init_qu8_conv_minmax_fp32_scalar_fmagic_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
}
}
}
-TEST(QU8_DWCONV_MINMAX_FP32_UP4X9__SCALAR_MAGIC, multipixel_with_output_stride) {
+TEST(QU8_DWCONV_MINMAX_FP32_UP4X9__SCALAR_FMAGIC, multipixel_with_output_stride) {
for (size_t channels = 1; channels <= 20; channels += 3) {
DWConvMicrokernelTester()
.cr(4)
@@ -6962,11 +6962,11 @@
.channels(4)
.width(5)
.output_stride(23)
- .Test(xnn_qu8_dwconv_minmax_fp32_ukernel_up4x9__scalar_magic, xnn_init_qu8_conv_minmax_fp32_scalar_magic_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
+ .Test(xnn_qu8_dwconv_minmax_fp32_ukernel_up4x9__scalar_fmagic, xnn_init_qu8_conv_minmax_fp32_scalar_fmagic_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
}
}
-TEST(QU8_DWCONV_MINMAX_FP32_UP4X9__SCALAR_MAGIC, multipixel_with_qmin) {
+TEST(QU8_DWCONV_MINMAX_FP32_UP4X9__SCALAR_FMAGIC, multipixel_with_qmin) {
for (size_t channels = 1; channels <= 20; channels += 3) {
DWConvMicrokernelTester()
.cr(4)
@@ -6974,11 +6974,11 @@
.channels(channels)
.width(3)
.qmin(128)
- .Test(xnn_qu8_dwconv_minmax_fp32_ukernel_up4x9__scalar_magic, xnn_init_qu8_conv_minmax_fp32_scalar_magic_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
+ .Test(xnn_qu8_dwconv_minmax_fp32_ukernel_up4x9__scalar_fmagic, xnn_init_qu8_conv_minmax_fp32_scalar_fmagic_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
}
}
-TEST(QU8_DWCONV_MINMAX_FP32_UP4X9__SCALAR_MAGIC, multipixel_with_qmax) {
+TEST(QU8_DWCONV_MINMAX_FP32_UP4X9__SCALAR_FMAGIC, multipixel_with_qmax) {
for (size_t channels = 1; channels <= 20; channels += 3) {
DWConvMicrokernelTester()
.cr(4)
@@ -6986,11 +6986,11 @@
.channels(channels)
.width(3)
.qmax(128)
- .Test(xnn_qu8_dwconv_minmax_fp32_ukernel_up4x9__scalar_magic, xnn_init_qu8_conv_minmax_fp32_scalar_magic_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
+ .Test(xnn_qu8_dwconv_minmax_fp32_ukernel_up4x9__scalar_fmagic, xnn_init_qu8_conv_minmax_fp32_scalar_fmagic_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
}
}
-TEST(QU8_DWCONV_MINMAX_FP32_UP4X9__SCALAR_MAGIC, input_zero_point_only) {
+TEST(QU8_DWCONV_MINMAX_FP32_UP4X9__SCALAR_FMAGIC, input_zero_point_only) {
for (size_t channels = 1; channels <= 20; channels += 3) {
DWConvMicrokernelTester()
.cr(4)
@@ -6999,11 +6999,11 @@
.width(3)
.input_zero_point(255)
.kernel_zero_point(0)
- .Test(xnn_qu8_dwconv_minmax_fp32_ukernel_up4x9__scalar_magic, xnn_init_qu8_conv_minmax_fp32_scalar_magic_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
+ .Test(xnn_qu8_dwconv_minmax_fp32_ukernel_up4x9__scalar_fmagic, xnn_init_qu8_conv_minmax_fp32_scalar_fmagic_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
}
}
-TEST(QU8_DWCONV_MINMAX_FP32_UP4X9__SCALAR_MAGIC, kernel_zero_point_only) {
+TEST(QU8_DWCONV_MINMAX_FP32_UP4X9__SCALAR_FMAGIC, kernel_zero_point_only) {
for (size_t channels = 1; channels <= 20; channels += 3) {
DWConvMicrokernelTester()
.cr(4)
@@ -7012,22 +7012,22 @@
.width(3)
.input_zero_point(0)
.kernel_zero_point(255)
- .Test(xnn_qu8_dwconv_minmax_fp32_ukernel_up4x9__scalar_magic, xnn_init_qu8_conv_minmax_fp32_scalar_magic_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
+ .Test(xnn_qu8_dwconv_minmax_fp32_ukernel_up4x9__scalar_fmagic, xnn_init_qu8_conv_minmax_fp32_scalar_fmagic_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
}
}
-TEST(QU8_DWCONV_MINMAX_FP32_UP4X9__SCALAR_MAGIC, input_offset) {
+TEST(QU8_DWCONV_MINMAX_FP32_UP4X9__SCALAR_FMAGIC, input_offset) {
for (uint32_t channels = 8; channels < 64; channels += 12) {
DWConvMicrokernelTester()
.cr(4)
.kr(9)
.channels(channels)
.input_offset(112)
- .Test(xnn_qu8_dwconv_minmax_fp32_ukernel_up4x9__scalar_magic, xnn_init_qu8_conv_minmax_fp32_scalar_magic_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
+ .Test(xnn_qu8_dwconv_minmax_fp32_ukernel_up4x9__scalar_fmagic, xnn_init_qu8_conv_minmax_fp32_scalar_fmagic_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
}
}
-TEST(QU8_DWCONV_MINMAX_FP32_UP4X9__SCALAR_MAGIC, zero) {
+TEST(QU8_DWCONV_MINMAX_FP32_UP4X9__SCALAR_FMAGIC, zero) {
for (uint32_t mz = 0; mz < 9; mz++) {
for (uint32_t channels = 8; channels < 64; channels += 12) {
DWConvMicrokernelTester()
@@ -7036,7 +7036,7 @@
.channels(channels)
.input_offset(112)
.zero_index(mz)
- .Test(xnn_qu8_dwconv_minmax_fp32_ukernel_up4x9__scalar_magic, xnn_init_qu8_conv_minmax_fp32_scalar_magic_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
+ .Test(xnn_qu8_dwconv_minmax_fp32_ukernel_up4x9__scalar_fmagic, xnn_init_qu8_conv_minmax_fp32_scalar_fmagic_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
}
}
}
@@ -12982,58 +12982,58 @@
#endif // XNN_ARCH_WASMSIMD || XNN_ARCH_WASMRELAXEDSIMD
-TEST(QU8_DWCONV_MINMAX_FP32_UP1X25__SCALAR_LRINT, c_eq_1) {
+TEST(QU8_DWCONV_MINMAX_FP32_UP1X25__SCALAR_LRINTF, c_eq_1) {
DWConvMicrokernelTester()
.cr(1)
.kr(25)
.channels(1)
- .Test(xnn_qu8_dwconv_minmax_fp32_ukernel_up1x25__scalar_lrint, xnn_init_qu8_conv_minmax_fp32_scalar_lrint_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
+ .Test(xnn_qu8_dwconv_minmax_fp32_ukernel_up1x25__scalar_lrintf, xnn_init_qu8_conv_minmax_fp32_scalar_lrintf_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
}
-TEST(QU8_DWCONV_MINMAX_FP32_UP1X25__SCALAR_LRINT, c_gt_1) {
+TEST(QU8_DWCONV_MINMAX_FP32_UP1X25__SCALAR_LRINTF, c_gt_1) {
for (uint32_t channels = 2; channels < 10; channels++) {
DWConvMicrokernelTester()
.cr(1)
.kr(25)
.channels(channels)
- .Test(xnn_qu8_dwconv_minmax_fp32_ukernel_up1x25__scalar_lrint, xnn_init_qu8_conv_minmax_fp32_scalar_lrint_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
+ .Test(xnn_qu8_dwconv_minmax_fp32_ukernel_up1x25__scalar_lrintf, xnn_init_qu8_conv_minmax_fp32_scalar_lrintf_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
}
}
-TEST(QU8_DWCONV_MINMAX_FP32_UP1X25__SCALAR_LRINT, c_gt_1_with_qmin) {
+TEST(QU8_DWCONV_MINMAX_FP32_UP1X25__SCALAR_LRINTF, c_gt_1_with_qmin) {
for (uint32_t channels = 2; channels < 10; channels++) {
DWConvMicrokernelTester()
.cr(1)
.kr(25)
.channels(channels)
.qmin(128)
- .Test(xnn_qu8_dwconv_minmax_fp32_ukernel_up1x25__scalar_lrint, xnn_init_qu8_conv_minmax_fp32_scalar_lrint_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
+ .Test(xnn_qu8_dwconv_minmax_fp32_ukernel_up1x25__scalar_lrintf, xnn_init_qu8_conv_minmax_fp32_scalar_lrintf_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
}
}
-TEST(QU8_DWCONV_MINMAX_FP32_UP1X25__SCALAR_LRINT, c_gt_1_with_qmax) {
+TEST(QU8_DWCONV_MINMAX_FP32_UP1X25__SCALAR_LRINTF, c_gt_1_with_qmax) {
for (uint32_t channels = 2; channels < 10; channels++) {
DWConvMicrokernelTester()
.cr(1)
.kr(25)
.channels(channels)
.qmax(128)
- .Test(xnn_qu8_dwconv_minmax_fp32_ukernel_up1x25__scalar_lrint, xnn_init_qu8_conv_minmax_fp32_scalar_lrint_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
+ .Test(xnn_qu8_dwconv_minmax_fp32_ukernel_up1x25__scalar_lrintf, xnn_init_qu8_conv_minmax_fp32_scalar_lrintf_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
}
}
-TEST(QU8_DWCONV_MINMAX_FP32_UP1X25__SCALAR_LRINT, multipixel) {
+TEST(QU8_DWCONV_MINMAX_FP32_UP1X25__SCALAR_LRINTF, multipixel) {
for (size_t channels = 1; channels <= 5; channels += 1) {
DWConvMicrokernelTester()
.cr(1)
.kr(25)
.channels(channels)
.width(3)
- .Test(xnn_qu8_dwconv_minmax_fp32_ukernel_up1x25__scalar_lrint, xnn_init_qu8_conv_minmax_fp32_scalar_lrint_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
+ .Test(xnn_qu8_dwconv_minmax_fp32_ukernel_up1x25__scalar_lrintf, xnn_init_qu8_conv_minmax_fp32_scalar_lrintf_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
}
}
-TEST(QU8_DWCONV_MINMAX_FP32_UP1X25__SCALAR_LRINT, multipixel_with_step) {
+TEST(QU8_DWCONV_MINMAX_FP32_UP1X25__SCALAR_LRINTF, multipixel_with_step) {
for (size_t channels = 1; channels <= 5; channels += 1) {
for (size_t step = 2; step <= 25; step++) {
DWConvMicrokernelTester()
@@ -13042,12 +13042,12 @@
.channels(channels)
.width(3)
.step(step)
- .Test(xnn_qu8_dwconv_minmax_fp32_ukernel_up1x25__scalar_lrint, xnn_init_qu8_conv_minmax_fp32_scalar_lrint_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
+ .Test(xnn_qu8_dwconv_minmax_fp32_ukernel_up1x25__scalar_lrintf, xnn_init_qu8_conv_minmax_fp32_scalar_lrintf_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
}
}
}
-TEST(QU8_DWCONV_MINMAX_FP32_UP1X25__SCALAR_LRINT, multipixel_with_output_stride) {
+TEST(QU8_DWCONV_MINMAX_FP32_UP1X25__SCALAR_LRINTF, multipixel_with_output_stride) {
for (size_t channels = 1; channels <= 5; channels += 1) {
DWConvMicrokernelTester()
.cr(1)
@@ -13055,11 +13055,11 @@
.channels(1)
.width(5)
.output_stride(7)
- .Test(xnn_qu8_dwconv_minmax_fp32_ukernel_up1x25__scalar_lrint, xnn_init_qu8_conv_minmax_fp32_scalar_lrint_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
+ .Test(xnn_qu8_dwconv_minmax_fp32_ukernel_up1x25__scalar_lrintf, xnn_init_qu8_conv_minmax_fp32_scalar_lrintf_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
}
}
-TEST(QU8_DWCONV_MINMAX_FP32_UP1X25__SCALAR_LRINT, multipixel_with_qmin) {
+TEST(QU8_DWCONV_MINMAX_FP32_UP1X25__SCALAR_LRINTF, multipixel_with_qmin) {
for (size_t channels = 1; channels <= 5; channels += 1) {
DWConvMicrokernelTester()
.cr(1)
@@ -13067,11 +13067,11 @@
.channels(channels)
.width(3)
.qmin(128)
- .Test(xnn_qu8_dwconv_minmax_fp32_ukernel_up1x25__scalar_lrint, xnn_init_qu8_conv_minmax_fp32_scalar_lrint_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
+ .Test(xnn_qu8_dwconv_minmax_fp32_ukernel_up1x25__scalar_lrintf, xnn_init_qu8_conv_minmax_fp32_scalar_lrintf_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
}
}
-TEST(QU8_DWCONV_MINMAX_FP32_UP1X25__SCALAR_LRINT, multipixel_with_qmax) {
+TEST(QU8_DWCONV_MINMAX_FP32_UP1X25__SCALAR_LRINTF, multipixel_with_qmax) {
for (size_t channels = 1; channels <= 5; channels += 1) {
DWConvMicrokernelTester()
.cr(1)
@@ -13079,11 +13079,11 @@
.channels(channels)
.width(3)
.qmax(128)
- .Test(xnn_qu8_dwconv_minmax_fp32_ukernel_up1x25__scalar_lrint, xnn_init_qu8_conv_minmax_fp32_scalar_lrint_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
+ .Test(xnn_qu8_dwconv_minmax_fp32_ukernel_up1x25__scalar_lrintf, xnn_init_qu8_conv_minmax_fp32_scalar_lrintf_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
}
}
-TEST(QU8_DWCONV_MINMAX_FP32_UP1X25__SCALAR_LRINT, input_zero_point_only) {
+TEST(QU8_DWCONV_MINMAX_FP32_UP1X25__SCALAR_LRINTF, input_zero_point_only) {
for (size_t channels = 1; channels <= 5; channels += 1) {
DWConvMicrokernelTester()
.cr(1)
@@ -13092,11 +13092,11 @@
.width(3)
.input_zero_point(255)
.kernel_zero_point(0)
- .Test(xnn_qu8_dwconv_minmax_fp32_ukernel_up1x25__scalar_lrint, xnn_init_qu8_conv_minmax_fp32_scalar_lrint_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
+ .Test(xnn_qu8_dwconv_minmax_fp32_ukernel_up1x25__scalar_lrintf, xnn_init_qu8_conv_minmax_fp32_scalar_lrintf_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
}
}
-TEST(QU8_DWCONV_MINMAX_FP32_UP1X25__SCALAR_LRINT, kernel_zero_point_only) {
+TEST(QU8_DWCONV_MINMAX_FP32_UP1X25__SCALAR_LRINTF, kernel_zero_point_only) {
for (size_t channels = 1; channels <= 5; channels += 1) {
DWConvMicrokernelTester()
.cr(1)
@@ -13105,22 +13105,22 @@
.width(3)
.input_zero_point(0)
.kernel_zero_point(255)
- .Test(xnn_qu8_dwconv_minmax_fp32_ukernel_up1x25__scalar_lrint, xnn_init_qu8_conv_minmax_fp32_scalar_lrint_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
+ .Test(xnn_qu8_dwconv_minmax_fp32_ukernel_up1x25__scalar_lrintf, xnn_init_qu8_conv_minmax_fp32_scalar_lrintf_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
}
}
-TEST(QU8_DWCONV_MINMAX_FP32_UP1X25__SCALAR_LRINT, input_offset) {
+TEST(QU8_DWCONV_MINMAX_FP32_UP1X25__SCALAR_LRINTF, input_offset) {
for (uint32_t channels = 2; channels < 16; channels += 3) {
DWConvMicrokernelTester()
.cr(1)
.kr(25)
.channels(channels)
.input_offset(48)
- .Test(xnn_qu8_dwconv_minmax_fp32_ukernel_up1x25__scalar_lrint, xnn_init_qu8_conv_minmax_fp32_scalar_lrint_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
+ .Test(xnn_qu8_dwconv_minmax_fp32_ukernel_up1x25__scalar_lrintf, xnn_init_qu8_conv_minmax_fp32_scalar_lrintf_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
}
}
-TEST(QU8_DWCONV_MINMAX_FP32_UP1X25__SCALAR_LRINT, zero) {
+TEST(QU8_DWCONV_MINMAX_FP32_UP1X25__SCALAR_LRINTF, zero) {
for (uint32_t mz = 0; mz < 25; mz++) {
for (uint32_t channels = 2; channels < 16; channels += 3) {
DWConvMicrokernelTester()
@@ -13129,105 +13129,105 @@
.channels(channels)
.input_offset(48)
.zero_index(mz)
- .Test(xnn_qu8_dwconv_minmax_fp32_ukernel_up1x25__scalar_lrint, xnn_init_qu8_conv_minmax_fp32_scalar_lrint_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
+ .Test(xnn_qu8_dwconv_minmax_fp32_ukernel_up1x25__scalar_lrintf, xnn_init_qu8_conv_minmax_fp32_scalar_lrintf_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
}
}
}
-TEST(QU8_DWCONV_MINMAX_FP32_UP2X25__SCALAR_LRINT, c_eq_2) {
+TEST(QU8_DWCONV_MINMAX_FP32_UP2X25__SCALAR_LRINTF, c_eq_2) {
DWConvMicrokernelTester()
.cr(2)
.kr(25)
.channels(2)
- .Test(xnn_qu8_dwconv_minmax_fp32_ukernel_up2x25__scalar_lrint, xnn_init_qu8_conv_minmax_fp32_scalar_lrint_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
+ .Test(xnn_qu8_dwconv_minmax_fp32_ukernel_up2x25__scalar_lrintf, xnn_init_qu8_conv_minmax_fp32_scalar_lrintf_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
}
-TEST(QU8_DWCONV_MINMAX_FP32_UP2X25__SCALAR_LRINT, c_div_2) {
+TEST(QU8_DWCONV_MINMAX_FP32_UP2X25__SCALAR_LRINTF, c_div_2) {
for (uint32_t channels = 4; channels < 32; channels += 6) {
DWConvMicrokernelTester()
.cr(2)
.kr(25)
.channels(channels)
- .Test(xnn_qu8_dwconv_minmax_fp32_ukernel_up2x25__scalar_lrint, xnn_init_qu8_conv_minmax_fp32_scalar_lrint_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
+ .Test(xnn_qu8_dwconv_minmax_fp32_ukernel_up2x25__scalar_lrintf, xnn_init_qu8_conv_minmax_fp32_scalar_lrintf_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
}
}
-TEST(QU8_DWCONV_MINMAX_FP32_UP2X25__SCALAR_LRINT, c_div_2_with_qmin) {
+TEST(QU8_DWCONV_MINMAX_FP32_UP2X25__SCALAR_LRINTF, c_div_2_with_qmin) {
for (uint32_t channels = 4; channels < 32; channels += 6) {
DWConvMicrokernelTester()
.cr(2)
.kr(25)
.channels(channels)
.qmin(128)
- .Test(xnn_qu8_dwconv_minmax_fp32_ukernel_up2x25__scalar_lrint, xnn_init_qu8_conv_minmax_fp32_scalar_lrint_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
+ .Test(xnn_qu8_dwconv_minmax_fp32_ukernel_up2x25__scalar_lrintf, xnn_init_qu8_conv_minmax_fp32_scalar_lrintf_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
}
}
-TEST(QU8_DWCONV_MINMAX_FP32_UP2X25__SCALAR_LRINT, c_div_2_with_qmax) {
+TEST(QU8_DWCONV_MINMAX_FP32_UP2X25__SCALAR_LRINTF, c_div_2_with_qmax) {
for (uint32_t channels = 4; channels < 32; channels += 6) {
DWConvMicrokernelTester()
.cr(2)
.kr(25)
.channels(channels)
.qmax(128)
- .Test(xnn_qu8_dwconv_minmax_fp32_ukernel_up2x25__scalar_lrint, xnn_init_qu8_conv_minmax_fp32_scalar_lrint_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
+ .Test(xnn_qu8_dwconv_minmax_fp32_ukernel_up2x25__scalar_lrintf, xnn_init_qu8_conv_minmax_fp32_scalar_lrintf_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
}
}
-TEST(QU8_DWCONV_MINMAX_FP32_UP2X25__SCALAR_LRINT, c_lt_2) {
+TEST(QU8_DWCONV_MINMAX_FP32_UP2X25__SCALAR_LRINTF, c_lt_2) {
for (uint32_t channels = 1; channels < 2; channels++) {
DWConvMicrokernelTester()
.cr(2)
.kr(25)
.channels(channels)
- .Test(xnn_qu8_dwconv_minmax_fp32_ukernel_up2x25__scalar_lrint, xnn_init_qu8_conv_minmax_fp32_scalar_lrint_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
+ .Test(xnn_qu8_dwconv_minmax_fp32_ukernel_up2x25__scalar_lrintf, xnn_init_qu8_conv_minmax_fp32_scalar_lrintf_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
}
}
-TEST(QU8_DWCONV_MINMAX_FP32_UP2X25__SCALAR_LRINT, c_gt_2) {
+TEST(QU8_DWCONV_MINMAX_FP32_UP2X25__SCALAR_LRINTF, c_gt_2) {
for (uint32_t channels = 3; channels < 4; channels++) {
DWConvMicrokernelTester()
.cr(2)
.kr(25)
.channels(channels)
- .Test(xnn_qu8_dwconv_minmax_fp32_ukernel_up2x25__scalar_lrint, xnn_init_qu8_conv_minmax_fp32_scalar_lrint_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
+ .Test(xnn_qu8_dwconv_minmax_fp32_ukernel_up2x25__scalar_lrintf, xnn_init_qu8_conv_minmax_fp32_scalar_lrintf_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
}
}
-TEST(QU8_DWCONV_MINMAX_FP32_UP2X25__SCALAR_LRINT, c_gt_2_with_qmin) {
+TEST(QU8_DWCONV_MINMAX_FP32_UP2X25__SCALAR_LRINTF, c_gt_2_with_qmin) {
for (uint32_t channels = 3; channels < 4; channels++) {
DWConvMicrokernelTester()
.cr(2)
.kr(25)
.channels(channels)
.qmin(128)
- .Test(xnn_qu8_dwconv_minmax_fp32_ukernel_up2x25__scalar_lrint, xnn_init_qu8_conv_minmax_fp32_scalar_lrint_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
+ .Test(xnn_qu8_dwconv_minmax_fp32_ukernel_up2x25__scalar_lrintf, xnn_init_qu8_conv_minmax_fp32_scalar_lrintf_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
}
}
-TEST(QU8_DWCONV_MINMAX_FP32_UP2X25__SCALAR_LRINT, c_gt_2_with_qmax) {
+TEST(QU8_DWCONV_MINMAX_FP32_UP2X25__SCALAR_LRINTF, c_gt_2_with_qmax) {
for (uint32_t channels = 3; channels < 4; channels++) {
DWConvMicrokernelTester()
.cr(2)
.kr(25)
.channels(channels)
.qmax(128)
- .Test(xnn_qu8_dwconv_minmax_fp32_ukernel_up2x25__scalar_lrint, xnn_init_qu8_conv_minmax_fp32_scalar_lrint_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
+ .Test(xnn_qu8_dwconv_minmax_fp32_ukernel_up2x25__scalar_lrintf, xnn_init_qu8_conv_minmax_fp32_scalar_lrintf_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
}
}
-TEST(QU8_DWCONV_MINMAX_FP32_UP2X25__SCALAR_LRINT, multipixel) {
+TEST(QU8_DWCONV_MINMAX_FP32_UP2X25__SCALAR_LRINTF, multipixel) {
for (size_t channels = 1; channels <= 10; channels += 1) {
DWConvMicrokernelTester()
.cr(2)
.kr(25)
.channels(channels)
.width(3)
- .Test(xnn_qu8_dwconv_minmax_fp32_ukernel_up2x25__scalar_lrint, xnn_init_qu8_conv_minmax_fp32_scalar_lrint_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
+ .Test(xnn_qu8_dwconv_minmax_fp32_ukernel_up2x25__scalar_lrintf, xnn_init_qu8_conv_minmax_fp32_scalar_lrintf_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
}
}
-TEST(QU8_DWCONV_MINMAX_FP32_UP2X25__SCALAR_LRINT, multipixel_with_step) {
+TEST(QU8_DWCONV_MINMAX_FP32_UP2X25__SCALAR_LRINTF, multipixel_with_step) {
for (size_t channels = 1; channels <= 10; channels += 1) {
for (size_t step = 2; step <= 25; step++) {
DWConvMicrokernelTester()
@@ -13236,12 +13236,12 @@
.channels(channels)
.width(3)
.step(step)
- .Test(xnn_qu8_dwconv_minmax_fp32_ukernel_up2x25__scalar_lrint, xnn_init_qu8_conv_minmax_fp32_scalar_lrint_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
+ .Test(xnn_qu8_dwconv_minmax_fp32_ukernel_up2x25__scalar_lrintf, xnn_init_qu8_conv_minmax_fp32_scalar_lrintf_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
}
}
}
-TEST(QU8_DWCONV_MINMAX_FP32_UP2X25__SCALAR_LRINT, multipixel_with_output_stride) {
+TEST(QU8_DWCONV_MINMAX_FP32_UP2X25__SCALAR_LRINTF, multipixel_with_output_stride) {
for (size_t channels = 1; channels <= 10; channels += 1) {
DWConvMicrokernelTester()
.cr(2)
@@ -13249,11 +13249,11 @@
.channels(2)
.width(5)
.output_stride(13)
- .Test(xnn_qu8_dwconv_minmax_fp32_ukernel_up2x25__scalar_lrint, xnn_init_qu8_conv_minmax_fp32_scalar_lrint_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
+ .Test(xnn_qu8_dwconv_minmax_fp32_ukernel_up2x25__scalar_lrintf, xnn_init_qu8_conv_minmax_fp32_scalar_lrintf_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
}
}
-TEST(QU8_DWCONV_MINMAX_FP32_UP2X25__SCALAR_LRINT, multipixel_with_qmin) {
+TEST(QU8_DWCONV_MINMAX_FP32_UP2X25__SCALAR_LRINTF, multipixel_with_qmin) {
for (size_t channels = 1; channels <= 10; channels += 1) {
DWConvMicrokernelTester()
.cr(2)
@@ -13261,11 +13261,11 @@
.channels(channels)
.width(3)
.qmin(128)
- .Test(xnn_qu8_dwconv_minmax_fp32_ukernel_up2x25__scalar_lrint, xnn_init_qu8_conv_minmax_fp32_scalar_lrint_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
+ .Test(xnn_qu8_dwconv_minmax_fp32_ukernel_up2x25__scalar_lrintf, xnn_init_qu8_conv_minmax_fp32_scalar_lrintf_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
}
}
-TEST(QU8_DWCONV_MINMAX_FP32_UP2X25__SCALAR_LRINT, multipixel_with_qmax) {
+TEST(QU8_DWCONV_MINMAX_FP32_UP2X25__SCALAR_LRINTF, multipixel_with_qmax) {
for (size_t channels = 1; channels <= 10; channels += 1) {
DWConvMicrokernelTester()
.cr(2)
@@ -13273,11 +13273,11 @@
.channels(channels)
.width(3)
.qmax(128)
- .Test(xnn_qu8_dwconv_minmax_fp32_ukernel_up2x25__scalar_lrint, xnn_init_qu8_conv_minmax_fp32_scalar_lrint_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
+ .Test(xnn_qu8_dwconv_minmax_fp32_ukernel_up2x25__scalar_lrintf, xnn_init_qu8_conv_minmax_fp32_scalar_lrintf_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
}
}
-TEST(QU8_DWCONV_MINMAX_FP32_UP2X25__SCALAR_LRINT, input_zero_point_only) {
+TEST(QU8_DWCONV_MINMAX_FP32_UP2X25__SCALAR_LRINTF, input_zero_point_only) {
for (size_t channels = 1; channels <= 10; channels += 1) {
DWConvMicrokernelTester()
.cr(2)
@@ -13286,11 +13286,11 @@
.width(3)
.input_zero_point(255)
.kernel_zero_point(0)
- .Test(xnn_qu8_dwconv_minmax_fp32_ukernel_up2x25__scalar_lrint, xnn_init_qu8_conv_minmax_fp32_scalar_lrint_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
+ .Test(xnn_qu8_dwconv_minmax_fp32_ukernel_up2x25__scalar_lrintf, xnn_init_qu8_conv_minmax_fp32_scalar_lrintf_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
}
}
-TEST(QU8_DWCONV_MINMAX_FP32_UP2X25__SCALAR_LRINT, kernel_zero_point_only) {
+TEST(QU8_DWCONV_MINMAX_FP32_UP2X25__SCALAR_LRINTF, kernel_zero_point_only) {
for (size_t channels = 1; channels <= 10; channels += 1) {
DWConvMicrokernelTester()
.cr(2)
@@ -13299,22 +13299,22 @@
.width(3)
.input_zero_point(0)
.kernel_zero_point(255)
- .Test(xnn_qu8_dwconv_minmax_fp32_ukernel_up2x25__scalar_lrint, xnn_init_qu8_conv_minmax_fp32_scalar_lrint_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
+ .Test(xnn_qu8_dwconv_minmax_fp32_ukernel_up2x25__scalar_lrintf, xnn_init_qu8_conv_minmax_fp32_scalar_lrintf_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
}
}
-TEST(QU8_DWCONV_MINMAX_FP32_UP2X25__SCALAR_LRINT, input_offset) {
+TEST(QU8_DWCONV_MINMAX_FP32_UP2X25__SCALAR_LRINTF, input_offset) {
for (uint32_t channels = 4; channels < 32; channels += 6) {
DWConvMicrokernelTester()
.cr(2)
.kr(25)
.channels(channels)
.input_offset(80)
- .Test(xnn_qu8_dwconv_minmax_fp32_ukernel_up2x25__scalar_lrint, xnn_init_qu8_conv_minmax_fp32_scalar_lrint_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
+ .Test(xnn_qu8_dwconv_minmax_fp32_ukernel_up2x25__scalar_lrintf, xnn_init_qu8_conv_minmax_fp32_scalar_lrintf_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
}
}
-TEST(QU8_DWCONV_MINMAX_FP32_UP2X25__SCALAR_LRINT, zero) {
+TEST(QU8_DWCONV_MINMAX_FP32_UP2X25__SCALAR_LRINTF, zero) {
for (uint32_t mz = 0; mz < 25; mz++) {
for (uint32_t channels = 4; channels < 32; channels += 6) {
DWConvMicrokernelTester()
@@ -13323,105 +13323,105 @@
.channels(channels)
.input_offset(80)
.zero_index(mz)
- .Test(xnn_qu8_dwconv_minmax_fp32_ukernel_up2x25__scalar_lrint, xnn_init_qu8_conv_minmax_fp32_scalar_lrint_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
+ .Test(xnn_qu8_dwconv_minmax_fp32_ukernel_up2x25__scalar_lrintf, xnn_init_qu8_conv_minmax_fp32_scalar_lrintf_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
}
}
}
-TEST(QU8_DWCONV_MINMAX_FP32_UP4X25__SCALAR_LRINT, c_eq_4) {
+TEST(QU8_DWCONV_MINMAX_FP32_UP4X25__SCALAR_LRINTF, c_eq_4) {
DWConvMicrokernelTester()
.cr(4)
.kr(25)
.channels(4)
- .Test(xnn_qu8_dwconv_minmax_fp32_ukernel_up4x25__scalar_lrint, xnn_init_qu8_conv_minmax_fp32_scalar_lrint_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
+ .Test(xnn_qu8_dwconv_minmax_fp32_ukernel_up4x25__scalar_lrintf, xnn_init_qu8_conv_minmax_fp32_scalar_lrintf_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
}
-TEST(QU8_DWCONV_MINMAX_FP32_UP4X25__SCALAR_LRINT, c_div_4) {
+TEST(QU8_DWCONV_MINMAX_FP32_UP4X25__SCALAR_LRINTF, c_div_4) {
for (uint32_t channels = 8; channels < 64; channels += 12) {
DWConvMicrokernelTester()
.cr(4)
.kr(25)
.channels(channels)
- .Test(xnn_qu8_dwconv_minmax_fp32_ukernel_up4x25__scalar_lrint, xnn_init_qu8_conv_minmax_fp32_scalar_lrint_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
+ .Test(xnn_qu8_dwconv_minmax_fp32_ukernel_up4x25__scalar_lrintf, xnn_init_qu8_conv_minmax_fp32_scalar_lrintf_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
}
}
-TEST(QU8_DWCONV_MINMAX_FP32_UP4X25__SCALAR_LRINT, c_div_4_with_qmin) {
+TEST(QU8_DWCONV_MINMAX_FP32_UP4X25__SCALAR_LRINTF, c_div_4_with_qmin) {
for (uint32_t channels = 8; channels < 64; channels += 12) {
DWConvMicrokernelTester()
.cr(4)
.kr(25)
.channels(channels)
.qmin(128)
- .Test(xnn_qu8_dwconv_minmax_fp32_ukernel_up4x25__scalar_lrint, xnn_init_qu8_conv_minmax_fp32_scalar_lrint_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
+ .Test(xnn_qu8_dwconv_minmax_fp32_ukernel_up4x25__scalar_lrintf, xnn_init_qu8_conv_minmax_fp32_scalar_lrintf_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
}
}
-TEST(QU8_DWCONV_MINMAX_FP32_UP4X25__SCALAR_LRINT, c_div_4_with_qmax) {
+TEST(QU8_DWCONV_MINMAX_FP32_UP4X25__SCALAR_LRINTF, c_div_4_with_qmax) {
for (uint32_t channels = 8; channels < 64; channels += 12) {
DWConvMicrokernelTester()
.cr(4)
.kr(25)
.channels(channels)
.qmax(128)
- .Test(xnn_qu8_dwconv_minmax_fp32_ukernel_up4x25__scalar_lrint, xnn_init_qu8_conv_minmax_fp32_scalar_lrint_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
+ .Test(xnn_qu8_dwconv_minmax_fp32_ukernel_up4x25__scalar_lrintf, xnn_init_qu8_conv_minmax_fp32_scalar_lrintf_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
}
}
-TEST(QU8_DWCONV_MINMAX_FP32_UP4X25__SCALAR_LRINT, c_lt_4) {
+TEST(QU8_DWCONV_MINMAX_FP32_UP4X25__SCALAR_LRINTF, c_lt_4) {
for (uint32_t channels = 1; channels < 4; channels++) {
DWConvMicrokernelTester()
.cr(4)
.kr(25)
.channels(channels)
- .Test(xnn_qu8_dwconv_minmax_fp32_ukernel_up4x25__scalar_lrint, xnn_init_qu8_conv_minmax_fp32_scalar_lrint_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
+ .Test(xnn_qu8_dwconv_minmax_fp32_ukernel_up4x25__scalar_lrintf, xnn_init_qu8_conv_minmax_fp32_scalar_lrintf_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
}
}
-TEST(QU8_DWCONV_MINMAX_FP32_UP4X25__SCALAR_LRINT, c_gt_4) {
+TEST(QU8_DWCONV_MINMAX_FP32_UP4X25__SCALAR_LRINTF, c_gt_4) {
for (uint32_t channels = 5; channels < 8; channels++) {
DWConvMicrokernelTester()
.cr(4)
.kr(25)
.channels(channels)
- .Test(xnn_qu8_dwconv_minmax_fp32_ukernel_up4x25__scalar_lrint, xnn_init_qu8_conv_minmax_fp32_scalar_lrint_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
+ .Test(xnn_qu8_dwconv_minmax_fp32_ukernel_up4x25__scalar_lrintf, xnn_init_qu8_conv_minmax_fp32_scalar_lrintf_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
}
}
-TEST(QU8_DWCONV_MINMAX_FP32_UP4X25__SCALAR_LRINT, c_gt_4_with_qmin) {
+TEST(QU8_DWCONV_MINMAX_FP32_UP4X25__SCALAR_LRINTF, c_gt_4_with_qmin) {
for (uint32_t channels = 5; channels < 8; channels++) {
DWConvMicrokernelTester()
.cr(4)
.kr(25)
.channels(channels)
.qmin(128)
- .Test(xnn_qu8_dwconv_minmax_fp32_ukernel_up4x25__scalar_lrint, xnn_init_qu8_conv_minmax_fp32_scalar_lrint_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
+ .Test(xnn_qu8_dwconv_minmax_fp32_ukernel_up4x25__scalar_lrintf, xnn_init_qu8_conv_minmax_fp32_scalar_lrintf_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
}
}
-TEST(QU8_DWCONV_MINMAX_FP32_UP4X25__SCALAR_LRINT, c_gt_4_with_qmax) {
+TEST(QU8_DWCONV_MINMAX_FP32_UP4X25__SCALAR_LRINTF, c_gt_4_with_qmax) {
for (uint32_t channels = 5; channels < 8; channels++) {
DWConvMicrokernelTester()
.cr(4)
.kr(25)
.channels(channels)
.qmax(128)
- .Test(xnn_qu8_dwconv_minmax_fp32_ukernel_up4x25__scalar_lrint, xnn_init_qu8_conv_minmax_fp32_scalar_lrint_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
+ .Test(xnn_qu8_dwconv_minmax_fp32_ukernel_up4x25__scalar_lrintf, xnn_init_qu8_conv_minmax_fp32_scalar_lrintf_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
}
}
-TEST(QU8_DWCONV_MINMAX_FP32_UP4X25__SCALAR_LRINT, multipixel) {
+TEST(QU8_DWCONV_MINMAX_FP32_UP4X25__SCALAR_LRINTF, multipixel) {
for (size_t channels = 1; channels <= 20; channels += 3) {
DWConvMicrokernelTester()
.cr(4)
.kr(25)
.channels(channels)
.width(3)
- .Test(xnn_qu8_dwconv_minmax_fp32_ukernel_up4x25__scalar_lrint, xnn_init_qu8_conv_minmax_fp32_scalar_lrint_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
+ .Test(xnn_qu8_dwconv_minmax_fp32_ukernel_up4x25__scalar_lrintf, xnn_init_qu8_conv_minmax_fp32_scalar_lrintf_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
}
}
-TEST(QU8_DWCONV_MINMAX_FP32_UP4X25__SCALAR_LRINT, multipixel_with_step) {
+TEST(QU8_DWCONV_MINMAX_FP32_UP4X25__SCALAR_LRINTF, multipixel_with_step) {
for (size_t channels = 1; channels <= 20; channels += 3) {
for (size_t step = 2; step <= 25; step++) {
DWConvMicrokernelTester()
@@ -13430,12 +13430,12 @@
.channels(channels)
.width(3)
.step(step)
- .Test(xnn_qu8_dwconv_minmax_fp32_ukernel_up4x25__scalar_lrint, xnn_init_qu8_conv_minmax_fp32_scalar_lrint_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
+ .Test(xnn_qu8_dwconv_minmax_fp32_ukernel_up4x25__scalar_lrintf, xnn_init_qu8_conv_minmax_fp32_scalar_lrintf_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
}
}
}
-TEST(QU8_DWCONV_MINMAX_FP32_UP4X25__SCALAR_LRINT, multipixel_with_output_stride) {
+TEST(QU8_DWCONV_MINMAX_FP32_UP4X25__SCALAR_LRINTF, multipixel_with_output_stride) {
for (size_t channels = 1; channels <= 20; channels += 3) {
DWConvMicrokernelTester()
.cr(4)
@@ -13443,11 +13443,11 @@
.channels(4)
.width(5)
.output_stride(23)
- .Test(xnn_qu8_dwconv_minmax_fp32_ukernel_up4x25__scalar_lrint, xnn_init_qu8_conv_minmax_fp32_scalar_lrint_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
+ .Test(xnn_qu8_dwconv_minmax_fp32_ukernel_up4x25__scalar_lrintf, xnn_init_qu8_conv_minmax_fp32_scalar_lrintf_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
}
}
-TEST(QU8_DWCONV_MINMAX_FP32_UP4X25__SCALAR_LRINT, multipixel_with_qmin) {
+TEST(QU8_DWCONV_MINMAX_FP32_UP4X25__SCALAR_LRINTF, multipixel_with_qmin) {
for (size_t channels = 1; channels <= 20; channels += 3) {
DWConvMicrokernelTester()
.cr(4)
@@ -13455,11 +13455,11 @@
.channels(channels)
.width(3)
.qmin(128)
- .Test(xnn_qu8_dwconv_minmax_fp32_ukernel_up4x25__scalar_lrint, xnn_init_qu8_conv_minmax_fp32_scalar_lrint_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
+ .Test(xnn_qu8_dwconv_minmax_fp32_ukernel_up4x25__scalar_lrintf, xnn_init_qu8_conv_minmax_fp32_scalar_lrintf_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
}
}
-TEST(QU8_DWCONV_MINMAX_FP32_UP4X25__SCALAR_LRINT, multipixel_with_qmax) {
+TEST(QU8_DWCONV_MINMAX_FP32_UP4X25__SCALAR_LRINTF, multipixel_with_qmax) {
for (size_t channels = 1; channels <= 20; channels += 3) {
DWConvMicrokernelTester()
.cr(4)
@@ -13467,11 +13467,11 @@
.channels(channels)
.width(3)
.qmax(128)
- .Test(xnn_qu8_dwconv_minmax_fp32_ukernel_up4x25__scalar_lrint, xnn_init_qu8_conv_minmax_fp32_scalar_lrint_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
+ .Test(xnn_qu8_dwconv_minmax_fp32_ukernel_up4x25__scalar_lrintf, xnn_init_qu8_conv_minmax_fp32_scalar_lrintf_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
}
}
-TEST(QU8_DWCONV_MINMAX_FP32_UP4X25__SCALAR_LRINT, input_zero_point_only) {
+TEST(QU8_DWCONV_MINMAX_FP32_UP4X25__SCALAR_LRINTF, input_zero_point_only) {
for (size_t channels = 1; channels <= 20; channels += 3) {
DWConvMicrokernelTester()
.cr(4)
@@ -13480,11 +13480,11 @@
.width(3)
.input_zero_point(255)
.kernel_zero_point(0)
- .Test(xnn_qu8_dwconv_minmax_fp32_ukernel_up4x25__scalar_lrint, xnn_init_qu8_conv_minmax_fp32_scalar_lrint_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
+ .Test(xnn_qu8_dwconv_minmax_fp32_ukernel_up4x25__scalar_lrintf, xnn_init_qu8_conv_minmax_fp32_scalar_lrintf_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
}
}
-TEST(QU8_DWCONV_MINMAX_FP32_UP4X25__SCALAR_LRINT, kernel_zero_point_only) {
+TEST(QU8_DWCONV_MINMAX_FP32_UP4X25__SCALAR_LRINTF, kernel_zero_point_only) {
for (size_t channels = 1; channels <= 20; channels += 3) {
DWConvMicrokernelTester()
.cr(4)
@@ -13493,22 +13493,22 @@
.width(3)
.input_zero_point(0)
.kernel_zero_point(255)
- .Test(xnn_qu8_dwconv_minmax_fp32_ukernel_up4x25__scalar_lrint, xnn_init_qu8_conv_minmax_fp32_scalar_lrint_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
+ .Test(xnn_qu8_dwconv_minmax_fp32_ukernel_up4x25__scalar_lrintf, xnn_init_qu8_conv_minmax_fp32_scalar_lrintf_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
}
}
-TEST(QU8_DWCONV_MINMAX_FP32_UP4X25__SCALAR_LRINT, input_offset) {
+TEST(QU8_DWCONV_MINMAX_FP32_UP4X25__SCALAR_LRINTF, input_offset) {
for (uint32_t channels = 8; channels < 64; channels += 12) {
DWConvMicrokernelTester()
.cr(4)
.kr(25)
.channels(channels)
.input_offset(112)
- .Test(xnn_qu8_dwconv_minmax_fp32_ukernel_up4x25__scalar_lrint, xnn_init_qu8_conv_minmax_fp32_scalar_lrint_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
+ .Test(xnn_qu8_dwconv_minmax_fp32_ukernel_up4x25__scalar_lrintf, xnn_init_qu8_conv_minmax_fp32_scalar_lrintf_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
}
}
-TEST(QU8_DWCONV_MINMAX_FP32_UP4X25__SCALAR_LRINT, zero) {
+TEST(QU8_DWCONV_MINMAX_FP32_UP4X25__SCALAR_LRINTF, zero) {
for (uint32_t mz = 0; mz < 25; mz++) {
for (uint32_t channels = 8; channels < 64; channels += 12) {
DWConvMicrokernelTester()
@@ -13517,63 +13517,63 @@
.channels(channels)
.input_offset(112)
.zero_index(mz)
- .Test(xnn_qu8_dwconv_minmax_fp32_ukernel_up4x25__scalar_lrint, xnn_init_qu8_conv_minmax_fp32_scalar_lrint_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
+ .Test(xnn_qu8_dwconv_minmax_fp32_ukernel_up4x25__scalar_lrintf, xnn_init_qu8_conv_minmax_fp32_scalar_lrintf_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
}
}
}
-TEST(QU8_DWCONV_MINMAX_FP32_UP1X25__SCALAR_MAGIC, c_eq_1) {
+TEST(QU8_DWCONV_MINMAX_FP32_UP1X25__SCALAR_FMAGIC, c_eq_1) {
DWConvMicrokernelTester()
.cr(1)
.kr(25)
.channels(1)
- .Test(xnn_qu8_dwconv_minmax_fp32_ukernel_up1x25__scalar_magic, xnn_init_qu8_conv_minmax_fp32_scalar_magic_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
+ .Test(xnn_qu8_dwconv_minmax_fp32_ukernel_up1x25__scalar_fmagic, xnn_init_qu8_conv_minmax_fp32_scalar_fmagic_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
}
-TEST(QU8_DWCONV_MINMAX_FP32_UP1X25__SCALAR_MAGIC, c_gt_1) {
+TEST(QU8_DWCONV_MINMAX_FP32_UP1X25__SCALAR_FMAGIC, c_gt_1) {
for (uint32_t channels = 2; channels < 10; channels++) {
DWConvMicrokernelTester()
.cr(1)
.kr(25)
.channels(channels)
- .Test(xnn_qu8_dwconv_minmax_fp32_ukernel_up1x25__scalar_magic, xnn_init_qu8_conv_minmax_fp32_scalar_magic_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
+ .Test(xnn_qu8_dwconv_minmax_fp32_ukernel_up1x25__scalar_fmagic, xnn_init_qu8_conv_minmax_fp32_scalar_fmagic_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
}
}
-TEST(QU8_DWCONV_MINMAX_FP32_UP1X25__SCALAR_MAGIC, c_gt_1_with_qmin) {
+TEST(QU8_DWCONV_MINMAX_FP32_UP1X25__SCALAR_FMAGIC, c_gt_1_with_qmin) {
for (uint32_t channels = 2; channels < 10; channels++) {
DWConvMicrokernelTester()
.cr(1)
.kr(25)
.channels(channels)
.qmin(128)
- .Test(xnn_qu8_dwconv_minmax_fp32_ukernel_up1x25__scalar_magic, xnn_init_qu8_conv_minmax_fp32_scalar_magic_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
+ .Test(xnn_qu8_dwconv_minmax_fp32_ukernel_up1x25__scalar_fmagic, xnn_init_qu8_conv_minmax_fp32_scalar_fmagic_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
}
}
-TEST(QU8_DWCONV_MINMAX_FP32_UP1X25__SCALAR_MAGIC, c_gt_1_with_qmax) {
+TEST(QU8_DWCONV_MINMAX_FP32_UP1X25__SCALAR_FMAGIC, c_gt_1_with_qmax) {
for (uint32_t channels = 2; channels < 10; channels++) {
DWConvMicrokernelTester()
.cr(1)
.kr(25)
.channels(channels)
.qmax(128)
- .Test(xnn_qu8_dwconv_minmax_fp32_ukernel_up1x25__scalar_magic, xnn_init_qu8_conv_minmax_fp32_scalar_magic_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
+ .Test(xnn_qu8_dwconv_minmax_fp32_ukernel_up1x25__scalar_fmagic, xnn_init_qu8_conv_minmax_fp32_scalar_fmagic_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
}
}
-TEST(QU8_DWCONV_MINMAX_FP32_UP1X25__SCALAR_MAGIC, multipixel) {
+TEST(QU8_DWCONV_MINMAX_FP32_UP1X25__SCALAR_FMAGIC, multipixel) {
for (size_t channels = 1; channels <= 5; channels += 1) {
DWConvMicrokernelTester()
.cr(1)
.kr(25)
.channels(channels)
.width(3)
- .Test(xnn_qu8_dwconv_minmax_fp32_ukernel_up1x25__scalar_magic, xnn_init_qu8_conv_minmax_fp32_scalar_magic_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
+ .Test(xnn_qu8_dwconv_minmax_fp32_ukernel_up1x25__scalar_fmagic, xnn_init_qu8_conv_minmax_fp32_scalar_fmagic_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
}
}
-TEST(QU8_DWCONV_MINMAX_FP32_UP1X25__SCALAR_MAGIC, multipixel_with_step) {
+TEST(QU8_DWCONV_MINMAX_FP32_UP1X25__SCALAR_FMAGIC, multipixel_with_step) {
for (size_t channels = 1; channels <= 5; channels += 1) {
for (size_t step = 2; step <= 25; step++) {
DWConvMicrokernelTester()
@@ -13582,12 +13582,12 @@
.channels(channels)
.width(3)
.step(step)
- .Test(xnn_qu8_dwconv_minmax_fp32_ukernel_up1x25__scalar_magic, xnn_init_qu8_conv_minmax_fp32_scalar_magic_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
+ .Test(xnn_qu8_dwconv_minmax_fp32_ukernel_up1x25__scalar_fmagic, xnn_init_qu8_conv_minmax_fp32_scalar_fmagic_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
}
}
}
-TEST(QU8_DWCONV_MINMAX_FP32_UP1X25__SCALAR_MAGIC, multipixel_with_output_stride) {
+TEST(QU8_DWCONV_MINMAX_FP32_UP1X25__SCALAR_FMAGIC, multipixel_with_output_stride) {
for (size_t channels = 1; channels <= 5; channels += 1) {
DWConvMicrokernelTester()
.cr(1)
@@ -13595,11 +13595,11 @@
.channels(1)
.width(5)
.output_stride(7)
- .Test(xnn_qu8_dwconv_minmax_fp32_ukernel_up1x25__scalar_magic, xnn_init_qu8_conv_minmax_fp32_scalar_magic_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
+ .Test(xnn_qu8_dwconv_minmax_fp32_ukernel_up1x25__scalar_fmagic, xnn_init_qu8_conv_minmax_fp32_scalar_fmagic_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
}
}
-TEST(QU8_DWCONV_MINMAX_FP32_UP1X25__SCALAR_MAGIC, multipixel_with_qmin) {
+TEST(QU8_DWCONV_MINMAX_FP32_UP1X25__SCALAR_FMAGIC, multipixel_with_qmin) {
for (size_t channels = 1; channels <= 5; channels += 1) {
DWConvMicrokernelTester()
.cr(1)
@@ -13607,11 +13607,11 @@
.channels(channels)
.width(3)
.qmin(128)
- .Test(xnn_qu8_dwconv_minmax_fp32_ukernel_up1x25__scalar_magic, xnn_init_qu8_conv_minmax_fp32_scalar_magic_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
+ .Test(xnn_qu8_dwconv_minmax_fp32_ukernel_up1x25__scalar_fmagic, xnn_init_qu8_conv_minmax_fp32_scalar_fmagic_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
}
}
-TEST(QU8_DWCONV_MINMAX_FP32_UP1X25__SCALAR_MAGIC, multipixel_with_qmax) {
+TEST(QU8_DWCONV_MINMAX_FP32_UP1X25__SCALAR_FMAGIC, multipixel_with_qmax) {
for (size_t channels = 1; channels <= 5; channels += 1) {
DWConvMicrokernelTester()
.cr(1)
@@ -13619,11 +13619,11 @@
.channels(channels)
.width(3)
.qmax(128)
- .Test(xnn_qu8_dwconv_minmax_fp32_ukernel_up1x25__scalar_magic, xnn_init_qu8_conv_minmax_fp32_scalar_magic_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
+ .Test(xnn_qu8_dwconv_minmax_fp32_ukernel_up1x25__scalar_fmagic, xnn_init_qu8_conv_minmax_fp32_scalar_fmagic_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
}
}
-TEST(QU8_DWCONV_MINMAX_FP32_UP1X25__SCALAR_MAGIC, input_zero_point_only) {
+TEST(QU8_DWCONV_MINMAX_FP32_UP1X25__SCALAR_FMAGIC, input_zero_point_only) {
for (size_t channels = 1; channels <= 5; channels += 1) {
DWConvMicrokernelTester()
.cr(1)
@@ -13632,11 +13632,11 @@
.width(3)
.input_zero_point(255)
.kernel_zero_point(0)
- .Test(xnn_qu8_dwconv_minmax_fp32_ukernel_up1x25__scalar_magic, xnn_init_qu8_conv_minmax_fp32_scalar_magic_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
+ .Test(xnn_qu8_dwconv_minmax_fp32_ukernel_up1x25__scalar_fmagic, xnn_init_qu8_conv_minmax_fp32_scalar_fmagic_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
}
}
-TEST(QU8_DWCONV_MINMAX_FP32_UP1X25__SCALAR_MAGIC, kernel_zero_point_only) {
+TEST(QU8_DWCONV_MINMAX_FP32_UP1X25__SCALAR_FMAGIC, kernel_zero_point_only) {
for (size_t channels = 1; channels <= 5; channels += 1) {
DWConvMicrokernelTester()
.cr(1)
@@ -13645,22 +13645,22 @@
.width(3)
.input_zero_point(0)
.kernel_zero_point(255)
- .Test(xnn_qu8_dwconv_minmax_fp32_ukernel_up1x25__scalar_magic, xnn_init_qu8_conv_minmax_fp32_scalar_magic_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
+ .Test(xnn_qu8_dwconv_minmax_fp32_ukernel_up1x25__scalar_fmagic, xnn_init_qu8_conv_minmax_fp32_scalar_fmagic_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
}
}
-TEST(QU8_DWCONV_MINMAX_FP32_UP1X25__SCALAR_MAGIC, input_offset) {
+TEST(QU8_DWCONV_MINMAX_FP32_UP1X25__SCALAR_FMAGIC, input_offset) {
for (uint32_t channels = 2; channels < 16; channels += 3) {
DWConvMicrokernelTester()
.cr(1)
.kr(25)
.channels(channels)
.input_offset(48)
- .Test(xnn_qu8_dwconv_minmax_fp32_ukernel_up1x25__scalar_magic, xnn_init_qu8_conv_minmax_fp32_scalar_magic_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
+ .Test(xnn_qu8_dwconv_minmax_fp32_ukernel_up1x25__scalar_fmagic, xnn_init_qu8_conv_minmax_fp32_scalar_fmagic_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
}
}
-TEST(QU8_DWCONV_MINMAX_FP32_UP1X25__SCALAR_MAGIC, zero) {
+TEST(QU8_DWCONV_MINMAX_FP32_UP1X25__SCALAR_FMAGIC, zero) {
for (uint32_t mz = 0; mz < 25; mz++) {
for (uint32_t channels = 2; channels < 16; channels += 3) {
DWConvMicrokernelTester()
@@ -13669,105 +13669,105 @@
.channels(channels)
.input_offset(48)
.zero_index(mz)
- .Test(xnn_qu8_dwconv_minmax_fp32_ukernel_up1x25__scalar_magic, xnn_init_qu8_conv_minmax_fp32_scalar_magic_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
+ .Test(xnn_qu8_dwconv_minmax_fp32_ukernel_up1x25__scalar_fmagic, xnn_init_qu8_conv_minmax_fp32_scalar_fmagic_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
}
}
}
-TEST(QU8_DWCONV_MINMAX_FP32_UP2X25__SCALAR_MAGIC, c_eq_2) {
+TEST(QU8_DWCONV_MINMAX_FP32_UP2X25__SCALAR_FMAGIC, c_eq_2) {
DWConvMicrokernelTester()
.cr(2)
.kr(25)
.channels(2)
- .Test(xnn_qu8_dwconv_minmax_fp32_ukernel_up2x25__scalar_magic, xnn_init_qu8_conv_minmax_fp32_scalar_magic_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
+ .Test(xnn_qu8_dwconv_minmax_fp32_ukernel_up2x25__scalar_fmagic, xnn_init_qu8_conv_minmax_fp32_scalar_fmagic_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
}
-TEST(QU8_DWCONV_MINMAX_FP32_UP2X25__SCALAR_MAGIC, c_div_2) {
+TEST(QU8_DWCONV_MINMAX_FP32_UP2X25__SCALAR_FMAGIC, c_div_2) {
for (uint32_t channels = 4; channels < 32; channels += 6) {
DWConvMicrokernelTester()
.cr(2)
.kr(25)
.channels(channels)
- .Test(xnn_qu8_dwconv_minmax_fp32_ukernel_up2x25__scalar_magic, xnn_init_qu8_conv_minmax_fp32_scalar_magic_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
+ .Test(xnn_qu8_dwconv_minmax_fp32_ukernel_up2x25__scalar_fmagic, xnn_init_qu8_conv_minmax_fp32_scalar_fmagic_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
}
}
-TEST(QU8_DWCONV_MINMAX_FP32_UP2X25__SCALAR_MAGIC, c_div_2_with_qmin) {
+TEST(QU8_DWCONV_MINMAX_FP32_UP2X25__SCALAR_FMAGIC, c_div_2_with_qmin) {
for (uint32_t channels = 4; channels < 32; channels += 6) {
DWConvMicrokernelTester()
.cr(2)
.kr(25)
.channels(channels)
.qmin(128)
- .Test(xnn_qu8_dwconv_minmax_fp32_ukernel_up2x25__scalar_magic, xnn_init_qu8_conv_minmax_fp32_scalar_magic_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
+ .Test(xnn_qu8_dwconv_minmax_fp32_ukernel_up2x25__scalar_fmagic, xnn_init_qu8_conv_minmax_fp32_scalar_fmagic_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
}
}
-TEST(QU8_DWCONV_MINMAX_FP32_UP2X25__SCALAR_MAGIC, c_div_2_with_qmax) {
+TEST(QU8_DWCONV_MINMAX_FP32_UP2X25__SCALAR_FMAGIC, c_div_2_with_qmax) {
for (uint32_t channels = 4; channels < 32; channels += 6) {
DWConvMicrokernelTester()
.cr(2)
.kr(25)
.channels(channels)
.qmax(128)
- .Test(xnn_qu8_dwconv_minmax_fp32_ukernel_up2x25__scalar_magic, xnn_init_qu8_conv_minmax_fp32_scalar_magic_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
+ .Test(xnn_qu8_dwconv_minmax_fp32_ukernel_up2x25__scalar_fmagic, xnn_init_qu8_conv_minmax_fp32_scalar_fmagic_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
}
}
-TEST(QU8_DWCONV_MINMAX_FP32_UP2X25__SCALAR_MAGIC, c_lt_2) {
+TEST(QU8_DWCONV_MINMAX_FP32_UP2X25__SCALAR_FMAGIC, c_lt_2) {
for (uint32_t channels = 1; channels < 2; channels++) {
DWConvMicrokernelTester()
.cr(2)
.kr(25)
.channels(channels)
- .Test(xnn_qu8_dwconv_minmax_fp32_ukernel_up2x25__scalar_magic, xnn_init_qu8_conv_minmax_fp32_scalar_magic_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
+ .Test(xnn_qu8_dwconv_minmax_fp32_ukernel_up2x25__scalar_fmagic, xnn_init_qu8_conv_minmax_fp32_scalar_fmagic_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
}
}
-TEST(QU8_DWCONV_MINMAX_FP32_UP2X25__SCALAR_MAGIC, c_gt_2) {
+TEST(QU8_DWCONV_MINMAX_FP32_UP2X25__SCALAR_FMAGIC, c_gt_2) {
for (uint32_t channels = 3; channels < 4; channels++) {
DWConvMicrokernelTester()
.cr(2)
.kr(25)
.channels(channels)
- .Test(xnn_qu8_dwconv_minmax_fp32_ukernel_up2x25__scalar_magic, xnn_init_qu8_conv_minmax_fp32_scalar_magic_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
+ .Test(xnn_qu8_dwconv_minmax_fp32_ukernel_up2x25__scalar_fmagic, xnn_init_qu8_conv_minmax_fp32_scalar_fmagic_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
}
}
-TEST(QU8_DWCONV_MINMAX_FP32_UP2X25__SCALAR_MAGIC, c_gt_2_with_qmin) {
+TEST(QU8_DWCONV_MINMAX_FP32_UP2X25__SCALAR_FMAGIC, c_gt_2_with_qmin) {
for (uint32_t channels = 3; channels < 4; channels++) {
DWConvMicrokernelTester()
.cr(2)
.kr(25)
.channels(channels)
.qmin(128)
- .Test(xnn_qu8_dwconv_minmax_fp32_ukernel_up2x25__scalar_magic, xnn_init_qu8_conv_minmax_fp32_scalar_magic_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
+ .Test(xnn_qu8_dwconv_minmax_fp32_ukernel_up2x25__scalar_fmagic, xnn_init_qu8_conv_minmax_fp32_scalar_fmagic_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
}
}
-TEST(QU8_DWCONV_MINMAX_FP32_UP2X25__SCALAR_MAGIC, c_gt_2_with_qmax) {
+TEST(QU8_DWCONV_MINMAX_FP32_UP2X25__SCALAR_FMAGIC, c_gt_2_with_qmax) {
for (uint32_t channels = 3; channels < 4; channels++) {
DWConvMicrokernelTester()
.cr(2)
.kr(25)
.channels(channels)
.qmax(128)
- .Test(xnn_qu8_dwconv_minmax_fp32_ukernel_up2x25__scalar_magic, xnn_init_qu8_conv_minmax_fp32_scalar_magic_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
+ .Test(xnn_qu8_dwconv_minmax_fp32_ukernel_up2x25__scalar_fmagic, xnn_init_qu8_conv_minmax_fp32_scalar_fmagic_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
}
}
-TEST(QU8_DWCONV_MINMAX_FP32_UP2X25__SCALAR_MAGIC, multipixel) {
+TEST(QU8_DWCONV_MINMAX_FP32_UP2X25__SCALAR_FMAGIC, multipixel) {
for (size_t channels = 1; channels <= 10; channels += 1) {
DWConvMicrokernelTester()
.cr(2)
.kr(25)
.channels(channels)
.width(3)
- .Test(xnn_qu8_dwconv_minmax_fp32_ukernel_up2x25__scalar_magic, xnn_init_qu8_conv_minmax_fp32_scalar_magic_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
+ .Test(xnn_qu8_dwconv_minmax_fp32_ukernel_up2x25__scalar_fmagic, xnn_init_qu8_conv_minmax_fp32_scalar_fmagic_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
}
}
-TEST(QU8_DWCONV_MINMAX_FP32_UP2X25__SCALAR_MAGIC, multipixel_with_step) {
+TEST(QU8_DWCONV_MINMAX_FP32_UP2X25__SCALAR_FMAGIC, multipixel_with_step) {
for (size_t channels = 1; channels <= 10; channels += 1) {
for (size_t step = 2; step <= 25; step++) {
DWConvMicrokernelTester()
@@ -13776,12 +13776,12 @@
.channels(channels)
.width(3)
.step(step)
- .Test(xnn_qu8_dwconv_minmax_fp32_ukernel_up2x25__scalar_magic, xnn_init_qu8_conv_minmax_fp32_scalar_magic_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
+ .Test(xnn_qu8_dwconv_minmax_fp32_ukernel_up2x25__scalar_fmagic, xnn_init_qu8_conv_minmax_fp32_scalar_fmagic_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
}
}
}
-TEST(QU8_DWCONV_MINMAX_FP32_UP2X25__SCALAR_MAGIC, multipixel_with_output_stride) {
+TEST(QU8_DWCONV_MINMAX_FP32_UP2X25__SCALAR_FMAGIC, multipixel_with_output_stride) {
for (size_t channels = 1; channels <= 10; channels += 1) {
DWConvMicrokernelTester()
.cr(2)
@@ -13789,11 +13789,11 @@
.channels(2)
.width(5)
.output_stride(13)
- .Test(xnn_qu8_dwconv_minmax_fp32_ukernel_up2x25__scalar_magic, xnn_init_qu8_conv_minmax_fp32_scalar_magic_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
+ .Test(xnn_qu8_dwconv_minmax_fp32_ukernel_up2x25__scalar_fmagic, xnn_init_qu8_conv_minmax_fp32_scalar_fmagic_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
}
}
-TEST(QU8_DWCONV_MINMAX_FP32_UP2X25__SCALAR_MAGIC, multipixel_with_qmin) {
+TEST(QU8_DWCONV_MINMAX_FP32_UP2X25__SCALAR_FMAGIC, multipixel_with_qmin) {
for (size_t channels = 1; channels <= 10; channels += 1) {
DWConvMicrokernelTester()
.cr(2)
@@ -13801,11 +13801,11 @@
.channels(channels)
.width(3)
.qmin(128)
- .Test(xnn_qu8_dwconv_minmax_fp32_ukernel_up2x25__scalar_magic, xnn_init_qu8_conv_minmax_fp32_scalar_magic_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
+ .Test(xnn_qu8_dwconv_minmax_fp32_ukernel_up2x25__scalar_fmagic, xnn_init_qu8_conv_minmax_fp32_scalar_fmagic_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
}
}
-TEST(QU8_DWCONV_MINMAX_FP32_UP2X25__SCALAR_MAGIC, multipixel_with_qmax) {
+TEST(QU8_DWCONV_MINMAX_FP32_UP2X25__SCALAR_FMAGIC, multipixel_with_qmax) {
for (size_t channels = 1; channels <= 10; channels += 1) {
DWConvMicrokernelTester()
.cr(2)
@@ -13813,11 +13813,11 @@
.channels(channels)
.width(3)
.qmax(128)
- .Test(xnn_qu8_dwconv_minmax_fp32_ukernel_up2x25__scalar_magic, xnn_init_qu8_conv_minmax_fp32_scalar_magic_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
+ .Test(xnn_qu8_dwconv_minmax_fp32_ukernel_up2x25__scalar_fmagic, xnn_init_qu8_conv_minmax_fp32_scalar_fmagic_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
}
}
-TEST(QU8_DWCONV_MINMAX_FP32_UP2X25__SCALAR_MAGIC, input_zero_point_only) {
+TEST(QU8_DWCONV_MINMAX_FP32_UP2X25__SCALAR_FMAGIC, input_zero_point_only) {
for (size_t channels = 1; channels <= 10; channels += 1) {
DWConvMicrokernelTester()
.cr(2)
@@ -13826,11 +13826,11 @@
.width(3)
.input_zero_point(255)
.kernel_zero_point(0)
- .Test(xnn_qu8_dwconv_minmax_fp32_ukernel_up2x25__scalar_magic, xnn_init_qu8_conv_minmax_fp32_scalar_magic_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
+ .Test(xnn_qu8_dwconv_minmax_fp32_ukernel_up2x25__scalar_fmagic, xnn_init_qu8_conv_minmax_fp32_scalar_fmagic_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
}
}
-TEST(QU8_DWCONV_MINMAX_FP32_UP2X25__SCALAR_MAGIC, kernel_zero_point_only) {
+TEST(QU8_DWCONV_MINMAX_FP32_UP2X25__SCALAR_FMAGIC, kernel_zero_point_only) {
for (size_t channels = 1; channels <= 10; channels += 1) {
DWConvMicrokernelTester()
.cr(2)
@@ -13839,22 +13839,22 @@
.width(3)
.input_zero_point(0)
.kernel_zero_point(255)
- .Test(xnn_qu8_dwconv_minmax_fp32_ukernel_up2x25__scalar_magic, xnn_init_qu8_conv_minmax_fp32_scalar_magic_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
+ .Test(xnn_qu8_dwconv_minmax_fp32_ukernel_up2x25__scalar_fmagic, xnn_init_qu8_conv_minmax_fp32_scalar_fmagic_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
}
}
-TEST(QU8_DWCONV_MINMAX_FP32_UP2X25__SCALAR_MAGIC, input_offset) {
+TEST(QU8_DWCONV_MINMAX_FP32_UP2X25__SCALAR_FMAGIC, input_offset) {
for (uint32_t channels = 4; channels < 32; channels += 6) {
DWConvMicrokernelTester()
.cr(2)
.kr(25)
.channels(channels)
.input_offset(80)
- .Test(xnn_qu8_dwconv_minmax_fp32_ukernel_up2x25__scalar_magic, xnn_init_qu8_conv_minmax_fp32_scalar_magic_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
+ .Test(xnn_qu8_dwconv_minmax_fp32_ukernel_up2x25__scalar_fmagic, xnn_init_qu8_conv_minmax_fp32_scalar_fmagic_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
}
}
-TEST(QU8_DWCONV_MINMAX_FP32_UP2X25__SCALAR_MAGIC, zero) {
+TEST(QU8_DWCONV_MINMAX_FP32_UP2X25__SCALAR_FMAGIC, zero) {
for (uint32_t mz = 0; mz < 25; mz++) {
for (uint32_t channels = 4; channels < 32; channels += 6) {
DWConvMicrokernelTester()
@@ -13863,105 +13863,105 @@
.channels(channels)
.input_offset(80)
.zero_index(mz)
- .Test(xnn_qu8_dwconv_minmax_fp32_ukernel_up2x25__scalar_magic, xnn_init_qu8_conv_minmax_fp32_scalar_magic_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
+ .Test(xnn_qu8_dwconv_minmax_fp32_ukernel_up2x25__scalar_fmagic, xnn_init_qu8_conv_minmax_fp32_scalar_fmagic_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
}
}
}
-TEST(QU8_DWCONV_MINMAX_FP32_UP4X25__SCALAR_MAGIC, c_eq_4) {
+TEST(QU8_DWCONV_MINMAX_FP32_UP4X25__SCALAR_FMAGIC, c_eq_4) {
DWConvMicrokernelTester()
.cr(4)
.kr(25)
.channels(4)
- .Test(xnn_qu8_dwconv_minmax_fp32_ukernel_up4x25__scalar_magic, xnn_init_qu8_conv_minmax_fp32_scalar_magic_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
+ .Test(xnn_qu8_dwconv_minmax_fp32_ukernel_up4x25__scalar_fmagic, xnn_init_qu8_conv_minmax_fp32_scalar_fmagic_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
}
-TEST(QU8_DWCONV_MINMAX_FP32_UP4X25__SCALAR_MAGIC, c_div_4) {
+TEST(QU8_DWCONV_MINMAX_FP32_UP4X25__SCALAR_FMAGIC, c_div_4) {
for (uint32_t channels = 8; channels < 64; channels += 12) {
DWConvMicrokernelTester()
.cr(4)
.kr(25)
.channels(channels)
- .Test(xnn_qu8_dwconv_minmax_fp32_ukernel_up4x25__scalar_magic, xnn_init_qu8_conv_minmax_fp32_scalar_magic_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
+ .Test(xnn_qu8_dwconv_minmax_fp32_ukernel_up4x25__scalar_fmagic, xnn_init_qu8_conv_minmax_fp32_scalar_fmagic_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
}
}
-TEST(QU8_DWCONV_MINMAX_FP32_UP4X25__SCALAR_MAGIC, c_div_4_with_qmin) {
+TEST(QU8_DWCONV_MINMAX_FP32_UP4X25__SCALAR_FMAGIC, c_div_4_with_qmin) {
for (uint32_t channels = 8; channels < 64; channels += 12) {
DWConvMicrokernelTester()
.cr(4)
.kr(25)
.channels(channels)
.qmin(128)
- .Test(xnn_qu8_dwconv_minmax_fp32_ukernel_up4x25__scalar_magic, xnn_init_qu8_conv_minmax_fp32_scalar_magic_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
+ .Test(xnn_qu8_dwconv_minmax_fp32_ukernel_up4x25__scalar_fmagic, xnn_init_qu8_conv_minmax_fp32_scalar_fmagic_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
}
}
-TEST(QU8_DWCONV_MINMAX_FP32_UP4X25__SCALAR_MAGIC, c_div_4_with_qmax) {
+TEST(QU8_DWCONV_MINMAX_FP32_UP4X25__SCALAR_FMAGIC, c_div_4_with_qmax) {
for (uint32_t channels = 8; channels < 64; channels += 12) {
DWConvMicrokernelTester()
.cr(4)
.kr(25)
.channels(channels)
.qmax(128)
- .Test(xnn_qu8_dwconv_minmax_fp32_ukernel_up4x25__scalar_magic, xnn_init_qu8_conv_minmax_fp32_scalar_magic_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
+ .Test(xnn_qu8_dwconv_minmax_fp32_ukernel_up4x25__scalar_fmagic, xnn_init_qu8_conv_minmax_fp32_scalar_fmagic_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
}
}
-TEST(QU8_DWCONV_MINMAX_FP32_UP4X25__SCALAR_MAGIC, c_lt_4) {
+TEST(QU8_DWCONV_MINMAX_FP32_UP4X25__SCALAR_FMAGIC, c_lt_4) {
for (uint32_t channels = 1; channels < 4; channels++) {
DWConvMicrokernelTester()
.cr(4)
.kr(25)
.channels(channels)
- .Test(xnn_qu8_dwconv_minmax_fp32_ukernel_up4x25__scalar_magic, xnn_init_qu8_conv_minmax_fp32_scalar_magic_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
+ .Test(xnn_qu8_dwconv_minmax_fp32_ukernel_up4x25__scalar_fmagic, xnn_init_qu8_conv_minmax_fp32_scalar_fmagic_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
}
}
-TEST(QU8_DWCONV_MINMAX_FP32_UP4X25__SCALAR_MAGIC, c_gt_4) {
+TEST(QU8_DWCONV_MINMAX_FP32_UP4X25__SCALAR_FMAGIC, c_gt_4) {
for (uint32_t channels = 5; channels < 8; channels++) {
DWConvMicrokernelTester()
.cr(4)
.kr(25)
.channels(channels)
- .Test(xnn_qu8_dwconv_minmax_fp32_ukernel_up4x25__scalar_magic, xnn_init_qu8_conv_minmax_fp32_scalar_magic_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
+ .Test(xnn_qu8_dwconv_minmax_fp32_ukernel_up4x25__scalar_fmagic, xnn_init_qu8_conv_minmax_fp32_scalar_fmagic_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
}
}
-TEST(QU8_DWCONV_MINMAX_FP32_UP4X25__SCALAR_MAGIC, c_gt_4_with_qmin) {
+TEST(QU8_DWCONV_MINMAX_FP32_UP4X25__SCALAR_FMAGIC, c_gt_4_with_qmin) {
for (uint32_t channels = 5; channels < 8; channels++) {
DWConvMicrokernelTester()
.cr(4)
.kr(25)
.channels(channels)
.qmin(128)
- .Test(xnn_qu8_dwconv_minmax_fp32_ukernel_up4x25__scalar_magic, xnn_init_qu8_conv_minmax_fp32_scalar_magic_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
+ .Test(xnn_qu8_dwconv_minmax_fp32_ukernel_up4x25__scalar_fmagic, xnn_init_qu8_conv_minmax_fp32_scalar_fmagic_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
}
}
-TEST(QU8_DWCONV_MINMAX_FP32_UP4X25__SCALAR_MAGIC, c_gt_4_with_qmax) {
+TEST(QU8_DWCONV_MINMAX_FP32_UP4X25__SCALAR_FMAGIC, c_gt_4_with_qmax) {
for (uint32_t channels = 5; channels < 8; channels++) {
DWConvMicrokernelTester()
.cr(4)
.kr(25)
.channels(channels)
.qmax(128)
- .Test(xnn_qu8_dwconv_minmax_fp32_ukernel_up4x25__scalar_magic, xnn_init_qu8_conv_minmax_fp32_scalar_magic_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
+ .Test(xnn_qu8_dwconv_minmax_fp32_ukernel_up4x25__scalar_fmagic, xnn_init_qu8_conv_minmax_fp32_scalar_fmagic_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
}
}
-TEST(QU8_DWCONV_MINMAX_FP32_UP4X25__SCALAR_MAGIC, multipixel) {
+TEST(QU8_DWCONV_MINMAX_FP32_UP4X25__SCALAR_FMAGIC, multipixel) {
for (size_t channels = 1; channels <= 20; channels += 3) {
DWConvMicrokernelTester()
.cr(4)
.kr(25)
.channels(channels)
.width(3)
- .Test(xnn_qu8_dwconv_minmax_fp32_ukernel_up4x25__scalar_magic, xnn_init_qu8_conv_minmax_fp32_scalar_magic_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
+ .Test(xnn_qu8_dwconv_minmax_fp32_ukernel_up4x25__scalar_fmagic, xnn_init_qu8_conv_minmax_fp32_scalar_fmagic_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
}
}
-TEST(QU8_DWCONV_MINMAX_FP32_UP4X25__SCALAR_MAGIC, multipixel_with_step) {
+TEST(QU8_DWCONV_MINMAX_FP32_UP4X25__SCALAR_FMAGIC, multipixel_with_step) {
for (size_t channels = 1; channels <= 20; channels += 3) {
for (size_t step = 2; step <= 25; step++) {
DWConvMicrokernelTester()
@@ -13970,12 +13970,12 @@
.channels(channels)
.width(3)
.step(step)
- .Test(xnn_qu8_dwconv_minmax_fp32_ukernel_up4x25__scalar_magic, xnn_init_qu8_conv_minmax_fp32_scalar_magic_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
+ .Test(xnn_qu8_dwconv_minmax_fp32_ukernel_up4x25__scalar_fmagic, xnn_init_qu8_conv_minmax_fp32_scalar_fmagic_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
}
}
}
-TEST(QU8_DWCONV_MINMAX_FP32_UP4X25__SCALAR_MAGIC, multipixel_with_output_stride) {
+TEST(QU8_DWCONV_MINMAX_FP32_UP4X25__SCALAR_FMAGIC, multipixel_with_output_stride) {
for (size_t channels = 1; channels <= 20; channels += 3) {
DWConvMicrokernelTester()
.cr(4)
@@ -13983,11 +13983,11 @@
.channels(4)
.width(5)
.output_stride(23)
- .Test(xnn_qu8_dwconv_minmax_fp32_ukernel_up4x25__scalar_magic, xnn_init_qu8_conv_minmax_fp32_scalar_magic_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
+ .Test(xnn_qu8_dwconv_minmax_fp32_ukernel_up4x25__scalar_fmagic, xnn_init_qu8_conv_minmax_fp32_scalar_fmagic_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
}
}
-TEST(QU8_DWCONV_MINMAX_FP32_UP4X25__SCALAR_MAGIC, multipixel_with_qmin) {
+TEST(QU8_DWCONV_MINMAX_FP32_UP4X25__SCALAR_FMAGIC, multipixel_with_qmin) {
for (size_t channels = 1; channels <= 20; channels += 3) {
DWConvMicrokernelTester()
.cr(4)
@@ -13995,11 +13995,11 @@
.channels(channels)
.width(3)
.qmin(128)
- .Test(xnn_qu8_dwconv_minmax_fp32_ukernel_up4x25__scalar_magic, xnn_init_qu8_conv_minmax_fp32_scalar_magic_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
+ .Test(xnn_qu8_dwconv_minmax_fp32_ukernel_up4x25__scalar_fmagic, xnn_init_qu8_conv_minmax_fp32_scalar_fmagic_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
}
}
-TEST(QU8_DWCONV_MINMAX_FP32_UP4X25__SCALAR_MAGIC, multipixel_with_qmax) {
+TEST(QU8_DWCONV_MINMAX_FP32_UP4X25__SCALAR_FMAGIC, multipixel_with_qmax) {
for (size_t channels = 1; channels <= 20; channels += 3) {
DWConvMicrokernelTester()
.cr(4)
@@ -14007,11 +14007,11 @@
.channels(channels)
.width(3)
.qmax(128)
- .Test(xnn_qu8_dwconv_minmax_fp32_ukernel_up4x25__scalar_magic, xnn_init_qu8_conv_minmax_fp32_scalar_magic_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
+ .Test(xnn_qu8_dwconv_minmax_fp32_ukernel_up4x25__scalar_fmagic, xnn_init_qu8_conv_minmax_fp32_scalar_fmagic_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
}
}
-TEST(QU8_DWCONV_MINMAX_FP32_UP4X25__SCALAR_MAGIC, input_zero_point_only) {
+TEST(QU8_DWCONV_MINMAX_FP32_UP4X25__SCALAR_FMAGIC, input_zero_point_only) {
for (size_t channels = 1; channels <= 20; channels += 3) {
DWConvMicrokernelTester()
.cr(4)
@@ -14020,11 +14020,11 @@
.width(3)
.input_zero_point(255)
.kernel_zero_point(0)
- .Test(xnn_qu8_dwconv_minmax_fp32_ukernel_up4x25__scalar_magic, xnn_init_qu8_conv_minmax_fp32_scalar_magic_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
+ .Test(xnn_qu8_dwconv_minmax_fp32_ukernel_up4x25__scalar_fmagic, xnn_init_qu8_conv_minmax_fp32_scalar_fmagic_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
}
}
-TEST(QU8_DWCONV_MINMAX_FP32_UP4X25__SCALAR_MAGIC, kernel_zero_point_only) {
+TEST(QU8_DWCONV_MINMAX_FP32_UP4X25__SCALAR_FMAGIC, kernel_zero_point_only) {
for (size_t channels = 1; channels <= 20; channels += 3) {
DWConvMicrokernelTester()
.cr(4)
@@ -14033,22 +14033,22 @@
.width(3)
.input_zero_point(0)
.kernel_zero_point(255)
- .Test(xnn_qu8_dwconv_minmax_fp32_ukernel_up4x25__scalar_magic, xnn_init_qu8_conv_minmax_fp32_scalar_magic_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
+ .Test(xnn_qu8_dwconv_minmax_fp32_ukernel_up4x25__scalar_fmagic, xnn_init_qu8_conv_minmax_fp32_scalar_fmagic_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
}
}
-TEST(QU8_DWCONV_MINMAX_FP32_UP4X25__SCALAR_MAGIC, input_offset) {
+TEST(QU8_DWCONV_MINMAX_FP32_UP4X25__SCALAR_FMAGIC, input_offset) {
for (uint32_t channels = 8; channels < 64; channels += 12) {
DWConvMicrokernelTester()
.cr(4)
.kr(25)
.channels(channels)
.input_offset(112)
- .Test(xnn_qu8_dwconv_minmax_fp32_ukernel_up4x25__scalar_magic, xnn_init_qu8_conv_minmax_fp32_scalar_magic_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
+ .Test(xnn_qu8_dwconv_minmax_fp32_ukernel_up4x25__scalar_fmagic, xnn_init_qu8_conv_minmax_fp32_scalar_fmagic_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
}
}
-TEST(QU8_DWCONV_MINMAX_FP32_UP4X25__SCALAR_MAGIC, zero) {
+TEST(QU8_DWCONV_MINMAX_FP32_UP4X25__SCALAR_FMAGIC, zero) {
for (uint32_t mz = 0; mz < 25; mz++) {
for (uint32_t channels = 8; channels < 64; channels += 12) {
DWConvMicrokernelTester()
@@ -14057,7 +14057,7 @@
.channels(channels)
.input_offset(112)
.zero_index(mz)
- .Test(xnn_qu8_dwconv_minmax_fp32_ukernel_up4x25__scalar_magic, xnn_init_qu8_conv_minmax_fp32_scalar_magic_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
+ .Test(xnn_qu8_dwconv_minmax_fp32_ukernel_up4x25__scalar_fmagic, xnn_init_qu8_conv_minmax_fp32_scalar_fmagic_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
}
}
}
\ No newline at end of file
diff --git a/test/qu8-dwconv-minmax-fp32.yaml b/test/qu8-dwconv-minmax-fp32.yaml
index f2c6075..9ac55fd 100644
--- a/test/qu8-dwconv-minmax-fp32.yaml
+++ b/test/qu8-dwconv-minmax-fp32.yaml
@@ -58,18 +58,18 @@
init: xnn_init_qu8_conv_minmax_fp32_wasmsimd_params
- name: xnn_qu8_dwconv_minmax_fp32_ukernel_up24x9__wasmsimd_mul16
init: xnn_init_qu8_conv_minmax_fp32_wasmsimd_params
-- name: xnn_qu8_dwconv_minmax_fp32_ukernel_up1x9__scalar_lrint
- init: xnn_init_qu8_conv_minmax_fp32_scalar_lrint_params
-- name: xnn_qu8_dwconv_minmax_fp32_ukernel_up2x9__scalar_lrint
- init: xnn_init_qu8_conv_minmax_fp32_scalar_lrint_params
-- name: xnn_qu8_dwconv_minmax_fp32_ukernel_up4x9__scalar_lrint
- init: xnn_init_qu8_conv_minmax_fp32_scalar_lrint_params
-- name: xnn_qu8_dwconv_minmax_fp32_ukernel_up1x9__scalar_magic
- init: xnn_init_qu8_conv_minmax_fp32_scalar_magic_params
-- name: xnn_qu8_dwconv_minmax_fp32_ukernel_up2x9__scalar_magic
- init: xnn_init_qu8_conv_minmax_fp32_scalar_magic_params
-- name: xnn_qu8_dwconv_minmax_fp32_ukernel_up4x9__scalar_magic
- init: xnn_init_qu8_conv_minmax_fp32_scalar_magic_params
+- name: xnn_qu8_dwconv_minmax_fp32_ukernel_up1x9__scalar_lrintf
+ init: xnn_init_qu8_conv_minmax_fp32_scalar_lrintf_params
+- name: xnn_qu8_dwconv_minmax_fp32_ukernel_up2x9__scalar_lrintf
+ init: xnn_init_qu8_conv_minmax_fp32_scalar_lrintf_params
+- name: xnn_qu8_dwconv_minmax_fp32_ukernel_up4x9__scalar_lrintf
+ init: xnn_init_qu8_conv_minmax_fp32_scalar_lrintf_params
+- name: xnn_qu8_dwconv_minmax_fp32_ukernel_up1x9__scalar_fmagic
+ init: xnn_init_qu8_conv_minmax_fp32_scalar_fmagic_params
+- name: xnn_qu8_dwconv_minmax_fp32_ukernel_up2x9__scalar_fmagic
+ init: xnn_init_qu8_conv_minmax_fp32_scalar_fmagic_params
+- name: xnn_qu8_dwconv_minmax_fp32_ukernel_up4x9__scalar_fmagic
+ init: xnn_init_qu8_conv_minmax_fp32_scalar_fmagic_params
- name: xnn_qu8_dwconv_minmax_fp32_ukernel_up8x25__neon_mul16
init: xnn_init_qu8_conv_minmax_fp32_neon_params
- name: xnn_qu8_dwconv_minmax_fp32_ukernel_up16x25__neon_mul16
@@ -126,15 +126,15 @@
init: xnn_init_qu8_conv_minmax_fp32_wasmsimd_params
- name: xnn_qu8_dwconv_minmax_fp32_ukernel_up24x25__wasmsimd_mul16
init: xnn_init_qu8_conv_minmax_fp32_wasmsimd_params
-- name: xnn_qu8_dwconv_minmax_fp32_ukernel_up1x25__scalar_lrint
- init: xnn_init_qu8_conv_minmax_fp32_scalar_lrint_params
-- name: xnn_qu8_dwconv_minmax_fp32_ukernel_up2x25__scalar_lrint
- init: xnn_init_qu8_conv_minmax_fp32_scalar_lrint_params
-- name: xnn_qu8_dwconv_minmax_fp32_ukernel_up4x25__scalar_lrint
- init: xnn_init_qu8_conv_minmax_fp32_scalar_lrint_params
-- name: xnn_qu8_dwconv_minmax_fp32_ukernel_up1x25__scalar_magic
- init: xnn_init_qu8_conv_minmax_fp32_scalar_magic_params
-- name: xnn_qu8_dwconv_minmax_fp32_ukernel_up2x25__scalar_magic
- init: xnn_init_qu8_conv_minmax_fp32_scalar_magic_params
-- name: xnn_qu8_dwconv_minmax_fp32_ukernel_up4x25__scalar_magic
- init: xnn_init_qu8_conv_minmax_fp32_scalar_magic_params
+- name: xnn_qu8_dwconv_minmax_fp32_ukernel_up1x25__scalar_lrintf
+ init: xnn_init_qu8_conv_minmax_fp32_scalar_lrintf_params
+- name: xnn_qu8_dwconv_minmax_fp32_ukernel_up2x25__scalar_lrintf
+ init: xnn_init_qu8_conv_minmax_fp32_scalar_lrintf_params
+- name: xnn_qu8_dwconv_minmax_fp32_ukernel_up4x25__scalar_lrintf
+ init: xnn_init_qu8_conv_minmax_fp32_scalar_lrintf_params
+- name: xnn_qu8_dwconv_minmax_fp32_ukernel_up1x25__scalar_fmagic
+ init: xnn_init_qu8_conv_minmax_fp32_scalar_fmagic_params
+- name: xnn_qu8_dwconv_minmax_fp32_ukernel_up2x25__scalar_fmagic
+ init: xnn_init_qu8_conv_minmax_fp32_scalar_fmagic_params
+- name: xnn_qu8_dwconv_minmax_fp32_ukernel_up4x25__scalar_fmagic
+ init: xnn_init_qu8_conv_minmax_fp32_scalar_fmagic_params
diff --git a/test/qu8-gemm-minmax-fp32.cc b/test/qu8-gemm-minmax-fp32.cc
index e691348..37c5a2f 100644
--- a/test/qu8-gemm-minmax-fp32.cc
+++ b/test/qu8-gemm-minmax-fp32.cc
@@ -47843,7 +47843,7 @@
#endif // XNN_ARCH_WASMSIMD || XNN_ARCH_WASMRELAXEDSIMD
-TEST(QU8_GEMM_MINMAX_FP32_1X2__SCALAR_LRINT, k_eq_1) {
+TEST(QU8_GEMM_MINMAX_FP32_1X2__SCALAR_LRINTF, k_eq_1) {
GemmMicrokernelTester()
.mr(1)
.nr(2)
@@ -47852,10 +47852,10 @@
.m(1)
.n(2)
.k(1)
- .Test(xnn_qu8_gemm_minmax_fp32_ukernel_1x2__scalar_lrint, xnn_init_qu8_conv_minmax_fp32_scalar_lrint_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
+ .Test(xnn_qu8_gemm_minmax_fp32_ukernel_1x2__scalar_lrintf, xnn_init_qu8_conv_minmax_fp32_scalar_lrintf_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
}
-TEST(QU8_GEMM_MINMAX_FP32_1X2__SCALAR_LRINT, strided_cn) {
+TEST(QU8_GEMM_MINMAX_FP32_1X2__SCALAR_LRINTF, strided_cn) {
GemmMicrokernelTester()
.mr(1)
.nr(2)
@@ -47865,10 +47865,10 @@
.n(2)
.k(1)
.cn_stride(5)
- .Test(xnn_qu8_gemm_minmax_fp32_ukernel_1x2__scalar_lrint, xnn_init_qu8_conv_minmax_fp32_scalar_lrint_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
+ .Test(xnn_qu8_gemm_minmax_fp32_ukernel_1x2__scalar_lrintf, xnn_init_qu8_conv_minmax_fp32_scalar_lrintf_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
}
-TEST(QU8_GEMM_MINMAX_FP32_1X2__SCALAR_LRINT, k_eq_1_strided_a) {
+TEST(QU8_GEMM_MINMAX_FP32_1X2__SCALAR_LRINTF, k_eq_1_strided_a) {
GemmMicrokernelTester()
.mr(1)
.nr(2)
@@ -47878,10 +47878,10 @@
.n(2)
.k(1)
.a_stride(3)
- .Test(xnn_qu8_gemm_minmax_fp32_ukernel_1x2__scalar_lrint, xnn_init_qu8_conv_minmax_fp32_scalar_lrint_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
+ .Test(xnn_qu8_gemm_minmax_fp32_ukernel_1x2__scalar_lrintf, xnn_init_qu8_conv_minmax_fp32_scalar_lrintf_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
}
-TEST(QU8_GEMM_MINMAX_FP32_1X2__SCALAR_LRINT, k_eq_1_subtile) {
+TEST(QU8_GEMM_MINMAX_FP32_1X2__SCALAR_LRINTF, k_eq_1_subtile) {
for (uint32_t m = 1; m <= 1; m++) {
for (uint32_t n = 1; n <= 2; n++) {
GemmMicrokernelTester()
@@ -47893,12 +47893,12 @@
.n(n)
.k(1)
.iterations(1)
- .Test(xnn_qu8_gemm_minmax_fp32_ukernel_1x2__scalar_lrint, xnn_init_qu8_conv_minmax_fp32_scalar_lrint_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
+ .Test(xnn_qu8_gemm_minmax_fp32_ukernel_1x2__scalar_lrintf, xnn_init_qu8_conv_minmax_fp32_scalar_lrintf_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
}
}
}
-TEST(QU8_GEMM_MINMAX_FP32_1X2__SCALAR_LRINT, k_eq_1_subtile_m) {
+TEST(QU8_GEMM_MINMAX_FP32_1X2__SCALAR_LRINTF, k_eq_1_subtile_m) {
for (uint32_t m = 1; m <= 1; m++) {
GemmMicrokernelTester()
.mr(1)
@@ -47909,11 +47909,11 @@
.n(2)
.k(1)
.iterations(1)
- .Test(xnn_qu8_gemm_minmax_fp32_ukernel_1x2__scalar_lrint, xnn_init_qu8_conv_minmax_fp32_scalar_lrint_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
+ .Test(xnn_qu8_gemm_minmax_fp32_ukernel_1x2__scalar_lrintf, xnn_init_qu8_conv_minmax_fp32_scalar_lrintf_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
}
}
-TEST(QU8_GEMM_MINMAX_FP32_1X2__SCALAR_LRINT, k_eq_1_subtile_n) {
+TEST(QU8_GEMM_MINMAX_FP32_1X2__SCALAR_LRINTF, k_eq_1_subtile_n) {
for (uint32_t n = 1; n <= 2; n++) {
GemmMicrokernelTester()
.mr(1)
@@ -47924,11 +47924,11 @@
.n(n)
.k(1)
.iterations(1)
- .Test(xnn_qu8_gemm_minmax_fp32_ukernel_1x2__scalar_lrint, xnn_init_qu8_conv_minmax_fp32_scalar_lrint_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
+ .Test(xnn_qu8_gemm_minmax_fp32_ukernel_1x2__scalar_lrintf, xnn_init_qu8_conv_minmax_fp32_scalar_lrintf_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
}
}
-TEST(QU8_GEMM_MINMAX_FP32_1X2__SCALAR_LRINT, k_gt_1) {
+TEST(QU8_GEMM_MINMAX_FP32_1X2__SCALAR_LRINTF, k_gt_1) {
for (size_t k = 2; k < 10; k++) {
GemmMicrokernelTester()
.mr(1)
@@ -47938,11 +47938,11 @@
.m(1)
.n(2)
.k(k)
- .Test(xnn_qu8_gemm_minmax_fp32_ukernel_1x2__scalar_lrint, xnn_init_qu8_conv_minmax_fp32_scalar_lrint_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
+ .Test(xnn_qu8_gemm_minmax_fp32_ukernel_1x2__scalar_lrintf, xnn_init_qu8_conv_minmax_fp32_scalar_lrintf_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
}
}
-TEST(QU8_GEMM_MINMAX_FP32_1X2__SCALAR_LRINT, k_gt_1_strided_a) {
+TEST(QU8_GEMM_MINMAX_FP32_1X2__SCALAR_LRINTF, k_gt_1_strided_a) {
for (size_t k = 2; k < 10; k++) {
GemmMicrokernelTester()
.mr(1)
@@ -47953,11 +47953,11 @@
.n(2)
.k(k)
.a_stride(11)
- .Test(xnn_qu8_gemm_minmax_fp32_ukernel_1x2__scalar_lrint, xnn_init_qu8_conv_minmax_fp32_scalar_lrint_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
+ .Test(xnn_qu8_gemm_minmax_fp32_ukernel_1x2__scalar_lrintf, xnn_init_qu8_conv_minmax_fp32_scalar_lrintf_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
}
}
-TEST(QU8_GEMM_MINMAX_FP32_1X2__SCALAR_LRINT, k_gt_1_subtile) {
+TEST(QU8_GEMM_MINMAX_FP32_1X2__SCALAR_LRINTF, k_gt_1_subtile) {
for (size_t k = 2; k < 10; k++) {
for (uint32_t m = 1; m <= 1; m++) {
for (uint32_t n = 1; n <= 2; n++) {
@@ -47970,13 +47970,13 @@
.n(n)
.k(k)
.iterations(1)
- .Test(xnn_qu8_gemm_minmax_fp32_ukernel_1x2__scalar_lrint, xnn_init_qu8_conv_minmax_fp32_scalar_lrint_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
+ .Test(xnn_qu8_gemm_minmax_fp32_ukernel_1x2__scalar_lrintf, xnn_init_qu8_conv_minmax_fp32_scalar_lrintf_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
}
}
}
}
-TEST(QU8_GEMM_MINMAX_FP32_1X2__SCALAR_LRINT, n_gt_2) {
+TEST(QU8_GEMM_MINMAX_FP32_1X2__SCALAR_LRINTF, n_gt_2) {
for (uint32_t n = 3; n < 4; n++) {
for (size_t k = 1; k <= 5; k += 2) {
GemmMicrokernelTester()
@@ -47987,12 +47987,12 @@
.m(1)
.n(2)
.k(k)
- .Test(xnn_qu8_gemm_minmax_fp32_ukernel_1x2__scalar_lrint, xnn_init_qu8_conv_minmax_fp32_scalar_lrint_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
+ .Test(xnn_qu8_gemm_minmax_fp32_ukernel_1x2__scalar_lrintf, xnn_init_qu8_conv_minmax_fp32_scalar_lrintf_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
}
}
}
-TEST(QU8_GEMM_MINMAX_FP32_1X2__SCALAR_LRINT, n_gt_2_strided_cn) {
+TEST(QU8_GEMM_MINMAX_FP32_1X2__SCALAR_LRINTF, n_gt_2_strided_cn) {
for (uint32_t n = 3; n < 4; n++) {
for (size_t k = 1; k <= 5; k += 2) {
GemmMicrokernelTester()
@@ -48004,12 +48004,12 @@
.n(2)
.k(k)
.cn_stride(5)
- .Test(xnn_qu8_gemm_minmax_fp32_ukernel_1x2__scalar_lrint, xnn_init_qu8_conv_minmax_fp32_scalar_lrint_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
+ .Test(xnn_qu8_gemm_minmax_fp32_ukernel_1x2__scalar_lrintf, xnn_init_qu8_conv_minmax_fp32_scalar_lrintf_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
}
}
}
-TEST(QU8_GEMM_MINMAX_FP32_1X2__SCALAR_LRINT, n_gt_2_strided_a) {
+TEST(QU8_GEMM_MINMAX_FP32_1X2__SCALAR_LRINTF, n_gt_2_strided_a) {
for (uint32_t n = 3; n < 4; n++) {
for (size_t k = 1; k <= 5; k += 2) {
GemmMicrokernelTester()
@@ -48021,12 +48021,12 @@
.n(n)
.k(k)
.a_stride(7)
- .Test(xnn_qu8_gemm_minmax_fp32_ukernel_1x2__scalar_lrint, xnn_init_qu8_conv_minmax_fp32_scalar_lrint_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
+ .Test(xnn_qu8_gemm_minmax_fp32_ukernel_1x2__scalar_lrintf, xnn_init_qu8_conv_minmax_fp32_scalar_lrintf_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
}
}
}
-TEST(QU8_GEMM_MINMAX_FP32_1X2__SCALAR_LRINT, n_gt_2_subtile) {
+TEST(QU8_GEMM_MINMAX_FP32_1X2__SCALAR_LRINTF, n_gt_2_subtile) {
for (uint32_t n = 3; n < 4; n++) {
for (size_t k = 1; k <= 5; k += 2) {
for (uint32_t m = 1; m <= 1; m++) {
@@ -48039,13 +48039,13 @@
.n(n)
.k(k)
.iterations(1)
- .Test(xnn_qu8_gemm_minmax_fp32_ukernel_1x2__scalar_lrint, xnn_init_qu8_conv_minmax_fp32_scalar_lrint_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
+ .Test(xnn_qu8_gemm_minmax_fp32_ukernel_1x2__scalar_lrintf, xnn_init_qu8_conv_minmax_fp32_scalar_lrintf_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
}
}
}
}
-TEST(QU8_GEMM_MINMAX_FP32_1X2__SCALAR_LRINT, n_div_2) {
+TEST(QU8_GEMM_MINMAX_FP32_1X2__SCALAR_LRINTF, n_div_2) {
for (uint32_t n = 4; n <= 6; n += 2) {
for (size_t k = 1; k <= 5; k += 2) {
GemmMicrokernelTester()
@@ -48056,12 +48056,12 @@
.m(1)
.n(2)
.k(k)
- .Test(xnn_qu8_gemm_minmax_fp32_ukernel_1x2__scalar_lrint, xnn_init_qu8_conv_minmax_fp32_scalar_lrint_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
+ .Test(xnn_qu8_gemm_minmax_fp32_ukernel_1x2__scalar_lrintf, xnn_init_qu8_conv_minmax_fp32_scalar_lrintf_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
}
}
}
-TEST(QU8_GEMM_MINMAX_FP32_1X2__SCALAR_LRINT, n_div_2_strided_cn) {
+TEST(QU8_GEMM_MINMAX_FP32_1X2__SCALAR_LRINTF, n_div_2_strided_cn) {
for (uint32_t n = 4; n <= 6; n += 2) {
for (size_t k = 1; k <= 5; k += 2) {
GemmMicrokernelTester()
@@ -48073,12 +48073,12 @@
.n(n)
.k(k)
.cn_stride(5)
- .Test(xnn_qu8_gemm_minmax_fp32_ukernel_1x2__scalar_lrint, xnn_init_qu8_conv_minmax_fp32_scalar_lrint_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
+ .Test(xnn_qu8_gemm_minmax_fp32_ukernel_1x2__scalar_lrintf, xnn_init_qu8_conv_minmax_fp32_scalar_lrintf_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
}
}
}
-TEST(QU8_GEMM_MINMAX_FP32_1X2__SCALAR_LRINT, n_div_2_strided_a) {
+TEST(QU8_GEMM_MINMAX_FP32_1X2__SCALAR_LRINTF, n_div_2_strided_a) {
for (uint32_t n = 4; n <= 6; n += 2) {
for (size_t k = 1; k <= 5; k += 2) {
GemmMicrokernelTester()
@@ -48090,12 +48090,12 @@
.n(n)
.k(k)
.a_stride(7)
- .Test(xnn_qu8_gemm_minmax_fp32_ukernel_1x2__scalar_lrint, xnn_init_qu8_conv_minmax_fp32_scalar_lrint_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
+ .Test(xnn_qu8_gemm_minmax_fp32_ukernel_1x2__scalar_lrintf, xnn_init_qu8_conv_minmax_fp32_scalar_lrintf_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
}
}
}
-TEST(QU8_GEMM_MINMAX_FP32_1X2__SCALAR_LRINT, n_div_2_subtile) {
+TEST(QU8_GEMM_MINMAX_FP32_1X2__SCALAR_LRINTF, n_div_2_subtile) {
for (uint32_t n = 4; n <= 6; n += 2) {
for (size_t k = 1; k <= 5; k += 2) {
for (uint32_t m = 1; m <= 1; m++) {
@@ -48108,13 +48108,13 @@
.n(n)
.k(k)
.iterations(1)
- .Test(xnn_qu8_gemm_minmax_fp32_ukernel_1x2__scalar_lrint, xnn_init_qu8_conv_minmax_fp32_scalar_lrint_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
+ .Test(xnn_qu8_gemm_minmax_fp32_ukernel_1x2__scalar_lrintf, xnn_init_qu8_conv_minmax_fp32_scalar_lrintf_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
}
}
}
}
-TEST(QU8_GEMM_MINMAX_FP32_1X2__SCALAR_LRINT, strided_cm_subtile) {
+TEST(QU8_GEMM_MINMAX_FP32_1X2__SCALAR_LRINTF, strided_cm_subtile) {
for (size_t k = 1; k <= 5; k += 2) {
for (uint32_t m = 1; m <= 1; m++) {
for (uint32_t n = 1; n <= 2; n++) {
@@ -48128,13 +48128,13 @@
.k(k)
.cm_stride(5)
.iterations(1)
- .Test(xnn_qu8_gemm_minmax_fp32_ukernel_1x2__scalar_lrint, xnn_init_qu8_conv_minmax_fp32_scalar_lrint_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
+ .Test(xnn_qu8_gemm_minmax_fp32_ukernel_1x2__scalar_lrintf, xnn_init_qu8_conv_minmax_fp32_scalar_lrintf_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
}
}
}
}
-TEST(QU8_GEMM_MINMAX_FP32_1X2__SCALAR_LRINT, qmin) {
+TEST(QU8_GEMM_MINMAX_FP32_1X2__SCALAR_LRINTF, qmin) {
GemmMicrokernelTester()
.mr(1)
.nr(2)
@@ -48144,10 +48144,10 @@
.n(2)
.k(1)
.qmin(128)
- .Test(xnn_qu8_gemm_minmax_fp32_ukernel_1x2__scalar_lrint, xnn_init_qu8_conv_minmax_fp32_scalar_lrint_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
+ .Test(xnn_qu8_gemm_minmax_fp32_ukernel_1x2__scalar_lrintf, xnn_init_qu8_conv_minmax_fp32_scalar_lrintf_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
}
-TEST(QU8_GEMM_MINMAX_FP32_1X2__SCALAR_LRINT, qmax) {
+TEST(QU8_GEMM_MINMAX_FP32_1X2__SCALAR_LRINTF, qmax) {
GemmMicrokernelTester()
.mr(1)
.nr(2)
@@ -48157,10 +48157,10 @@
.n(2)
.k(1)
.qmax(128)
- .Test(xnn_qu8_gemm_minmax_fp32_ukernel_1x2__scalar_lrint, xnn_init_qu8_conv_minmax_fp32_scalar_lrint_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
+ .Test(xnn_qu8_gemm_minmax_fp32_ukernel_1x2__scalar_lrintf, xnn_init_qu8_conv_minmax_fp32_scalar_lrintf_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
}
-TEST(QU8_GEMM_MINMAX_FP32_1X2__SCALAR_LRINT, strided_cm) {
+TEST(QU8_GEMM_MINMAX_FP32_1X2__SCALAR_LRINTF, strided_cm) {
GemmMicrokernelTester()
.mr(1)
.nr(2)
@@ -48170,10 +48170,10 @@
.n(2)
.k(1)
.cm_stride(5)
- .Test(xnn_qu8_gemm_minmax_fp32_ukernel_1x2__scalar_lrint, xnn_init_qu8_conv_minmax_fp32_scalar_lrint_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
+ .Test(xnn_qu8_gemm_minmax_fp32_ukernel_1x2__scalar_lrintf, xnn_init_qu8_conv_minmax_fp32_scalar_lrintf_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
}
-TEST(QU8_GEMM_MINMAX_FP32_1X2__SCALAR_LRINT, no_a_zero_point) {
+TEST(QU8_GEMM_MINMAX_FP32_1X2__SCALAR_LRINTF, no_a_zero_point) {
for (size_t k = 1; k <= 5; k += 2) {
GemmMicrokernelTester()
.mr(1)
@@ -48184,11 +48184,11 @@
.n(2)
.k(k)
.a_zero_point(0)
- .Test(xnn_qu8_gemm_minmax_fp32_ukernel_1x2__scalar_lrint, xnn_init_qu8_conv_minmax_fp32_scalar_lrint_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
+ .Test(xnn_qu8_gemm_minmax_fp32_ukernel_1x2__scalar_lrintf, xnn_init_qu8_conv_minmax_fp32_scalar_lrintf_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
}
}
-TEST(QU8_GEMM_MINMAX_FP32_1X2__SCALAR_LRINT, no_b_zero_point) {
+TEST(QU8_GEMM_MINMAX_FP32_1X2__SCALAR_LRINTF, no_b_zero_point) {
for (size_t k = 1; k <= 5; k += 2) {
GemmMicrokernelTester()
.mr(1)
@@ -48199,11 +48199,11 @@
.n(2)
.k(k)
.b_zero_point(0)
- .Test(xnn_qu8_gemm_minmax_fp32_ukernel_1x2__scalar_lrint, xnn_init_qu8_conv_minmax_fp32_scalar_lrint_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
+ .Test(xnn_qu8_gemm_minmax_fp32_ukernel_1x2__scalar_lrintf, xnn_init_qu8_conv_minmax_fp32_scalar_lrintf_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
}
}
-TEST(QU8_GEMM_MINMAX_FP32_1X2__SCALAR_LRINT, no_zero_point) {
+TEST(QU8_GEMM_MINMAX_FP32_1X2__SCALAR_LRINTF, no_zero_point) {
for (size_t k = 1; k <= 5; k += 2) {
GemmMicrokernelTester()
.mr(1)
@@ -48215,11 +48215,11 @@
.k(k)
.a_zero_point(0)
.b_zero_point(0)
- .Test(xnn_qu8_gemm_minmax_fp32_ukernel_1x2__scalar_lrint, xnn_init_qu8_conv_minmax_fp32_scalar_lrint_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
+ .Test(xnn_qu8_gemm_minmax_fp32_ukernel_1x2__scalar_lrintf, xnn_init_qu8_conv_minmax_fp32_scalar_lrintf_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
}
}
-TEST(QU8_GEMM_MINMAX_FP32_2X2__SCALAR_LRINT, k_eq_1) {
+TEST(QU8_GEMM_MINMAX_FP32_2X2__SCALAR_LRINTF, k_eq_1) {
GemmMicrokernelTester()
.mr(2)
.nr(2)
@@ -48228,10 +48228,10 @@
.m(2)
.n(2)
.k(1)
- .Test(xnn_qu8_gemm_minmax_fp32_ukernel_2x2__scalar_lrint, xnn_init_qu8_conv_minmax_fp32_scalar_lrint_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
+ .Test(xnn_qu8_gemm_minmax_fp32_ukernel_2x2__scalar_lrintf, xnn_init_qu8_conv_minmax_fp32_scalar_lrintf_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
}
-TEST(QU8_GEMM_MINMAX_FP32_2X2__SCALAR_LRINT, strided_cn) {
+TEST(QU8_GEMM_MINMAX_FP32_2X2__SCALAR_LRINTF, strided_cn) {
GemmMicrokernelTester()
.mr(2)
.nr(2)
@@ -48241,10 +48241,10 @@
.n(2)
.k(1)
.cn_stride(5)
- .Test(xnn_qu8_gemm_minmax_fp32_ukernel_2x2__scalar_lrint, xnn_init_qu8_conv_minmax_fp32_scalar_lrint_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
+ .Test(xnn_qu8_gemm_minmax_fp32_ukernel_2x2__scalar_lrintf, xnn_init_qu8_conv_minmax_fp32_scalar_lrintf_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
}
-TEST(QU8_GEMM_MINMAX_FP32_2X2__SCALAR_LRINT, k_eq_1_strided_a) {
+TEST(QU8_GEMM_MINMAX_FP32_2X2__SCALAR_LRINTF, k_eq_1_strided_a) {
GemmMicrokernelTester()
.mr(2)
.nr(2)
@@ -48254,10 +48254,10 @@
.n(2)
.k(1)
.a_stride(3)
- .Test(xnn_qu8_gemm_minmax_fp32_ukernel_2x2__scalar_lrint, xnn_init_qu8_conv_minmax_fp32_scalar_lrint_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
+ .Test(xnn_qu8_gemm_minmax_fp32_ukernel_2x2__scalar_lrintf, xnn_init_qu8_conv_minmax_fp32_scalar_lrintf_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
}
-TEST(QU8_GEMM_MINMAX_FP32_2X2__SCALAR_LRINT, k_eq_1_subtile) {
+TEST(QU8_GEMM_MINMAX_FP32_2X2__SCALAR_LRINTF, k_eq_1_subtile) {
for (uint32_t m = 1; m <= 2; m++) {
for (uint32_t n = 1; n <= 2; n++) {
GemmMicrokernelTester()
@@ -48269,12 +48269,12 @@
.n(n)
.k(1)
.iterations(1)
- .Test(xnn_qu8_gemm_minmax_fp32_ukernel_2x2__scalar_lrint, xnn_init_qu8_conv_minmax_fp32_scalar_lrint_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
+ .Test(xnn_qu8_gemm_minmax_fp32_ukernel_2x2__scalar_lrintf, xnn_init_qu8_conv_minmax_fp32_scalar_lrintf_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
}
}
}
-TEST(QU8_GEMM_MINMAX_FP32_2X2__SCALAR_LRINT, k_eq_1_subtile_m) {
+TEST(QU8_GEMM_MINMAX_FP32_2X2__SCALAR_LRINTF, k_eq_1_subtile_m) {
for (uint32_t m = 1; m <= 2; m++) {
GemmMicrokernelTester()
.mr(2)
@@ -48285,11 +48285,11 @@
.n(2)
.k(1)
.iterations(1)
- .Test(xnn_qu8_gemm_minmax_fp32_ukernel_2x2__scalar_lrint, xnn_init_qu8_conv_minmax_fp32_scalar_lrint_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
+ .Test(xnn_qu8_gemm_minmax_fp32_ukernel_2x2__scalar_lrintf, xnn_init_qu8_conv_minmax_fp32_scalar_lrintf_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
}
}
-TEST(QU8_GEMM_MINMAX_FP32_2X2__SCALAR_LRINT, k_eq_1_subtile_n) {
+TEST(QU8_GEMM_MINMAX_FP32_2X2__SCALAR_LRINTF, k_eq_1_subtile_n) {
for (uint32_t n = 1; n <= 2; n++) {
GemmMicrokernelTester()
.mr(2)
@@ -48300,11 +48300,11 @@
.n(n)
.k(1)
.iterations(1)
- .Test(xnn_qu8_gemm_minmax_fp32_ukernel_2x2__scalar_lrint, xnn_init_qu8_conv_minmax_fp32_scalar_lrint_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
+ .Test(xnn_qu8_gemm_minmax_fp32_ukernel_2x2__scalar_lrintf, xnn_init_qu8_conv_minmax_fp32_scalar_lrintf_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
}
}
-TEST(QU8_GEMM_MINMAX_FP32_2X2__SCALAR_LRINT, k_gt_1) {
+TEST(QU8_GEMM_MINMAX_FP32_2X2__SCALAR_LRINTF, k_gt_1) {
for (size_t k = 2; k < 10; k++) {
GemmMicrokernelTester()
.mr(2)
@@ -48314,11 +48314,11 @@
.m(2)
.n(2)
.k(k)
- .Test(xnn_qu8_gemm_minmax_fp32_ukernel_2x2__scalar_lrint, xnn_init_qu8_conv_minmax_fp32_scalar_lrint_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
+ .Test(xnn_qu8_gemm_minmax_fp32_ukernel_2x2__scalar_lrintf, xnn_init_qu8_conv_minmax_fp32_scalar_lrintf_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
}
}
-TEST(QU8_GEMM_MINMAX_FP32_2X2__SCALAR_LRINT, k_gt_1_strided_a) {
+TEST(QU8_GEMM_MINMAX_FP32_2X2__SCALAR_LRINTF, k_gt_1_strided_a) {
for (size_t k = 2; k < 10; k++) {
GemmMicrokernelTester()
.mr(2)
@@ -48329,11 +48329,11 @@
.n(2)
.k(k)
.a_stride(11)
- .Test(xnn_qu8_gemm_minmax_fp32_ukernel_2x2__scalar_lrint, xnn_init_qu8_conv_minmax_fp32_scalar_lrint_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
+ .Test(xnn_qu8_gemm_minmax_fp32_ukernel_2x2__scalar_lrintf, xnn_init_qu8_conv_minmax_fp32_scalar_lrintf_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
}
}
-TEST(QU8_GEMM_MINMAX_FP32_2X2__SCALAR_LRINT, k_gt_1_subtile) {
+TEST(QU8_GEMM_MINMAX_FP32_2X2__SCALAR_LRINTF, k_gt_1_subtile) {
for (size_t k = 2; k < 10; k++) {
for (uint32_t m = 1; m <= 2; m++) {
for (uint32_t n = 1; n <= 2; n++) {
@@ -48346,13 +48346,13 @@
.n(n)
.k(k)
.iterations(1)
- .Test(xnn_qu8_gemm_minmax_fp32_ukernel_2x2__scalar_lrint, xnn_init_qu8_conv_minmax_fp32_scalar_lrint_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
+ .Test(xnn_qu8_gemm_minmax_fp32_ukernel_2x2__scalar_lrintf, xnn_init_qu8_conv_minmax_fp32_scalar_lrintf_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
}
}
}
}
-TEST(QU8_GEMM_MINMAX_FP32_2X2__SCALAR_LRINT, n_gt_2) {
+TEST(QU8_GEMM_MINMAX_FP32_2X2__SCALAR_LRINTF, n_gt_2) {
for (uint32_t n = 3; n < 4; n++) {
for (size_t k = 1; k <= 5; k += 2) {
GemmMicrokernelTester()
@@ -48363,12 +48363,12 @@
.m(2)
.n(2)
.k(k)
- .Test(xnn_qu8_gemm_minmax_fp32_ukernel_2x2__scalar_lrint, xnn_init_qu8_conv_minmax_fp32_scalar_lrint_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
+ .Test(xnn_qu8_gemm_minmax_fp32_ukernel_2x2__scalar_lrintf, xnn_init_qu8_conv_minmax_fp32_scalar_lrintf_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
}
}
}
-TEST(QU8_GEMM_MINMAX_FP32_2X2__SCALAR_LRINT, n_gt_2_strided_cn) {
+TEST(QU8_GEMM_MINMAX_FP32_2X2__SCALAR_LRINTF, n_gt_2_strided_cn) {
for (uint32_t n = 3; n < 4; n++) {
for (size_t k = 1; k <= 5; k += 2) {
GemmMicrokernelTester()
@@ -48380,12 +48380,12 @@
.n(2)
.k(k)
.cn_stride(5)
- .Test(xnn_qu8_gemm_minmax_fp32_ukernel_2x2__scalar_lrint, xnn_init_qu8_conv_minmax_fp32_scalar_lrint_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
+ .Test(xnn_qu8_gemm_minmax_fp32_ukernel_2x2__scalar_lrintf, xnn_init_qu8_conv_minmax_fp32_scalar_lrintf_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
}
}
}
-TEST(QU8_GEMM_MINMAX_FP32_2X2__SCALAR_LRINT, n_gt_2_strided_a) {
+TEST(QU8_GEMM_MINMAX_FP32_2X2__SCALAR_LRINTF, n_gt_2_strided_a) {
for (uint32_t n = 3; n < 4; n++) {
for (size_t k = 1; k <= 5; k += 2) {
GemmMicrokernelTester()
@@ -48397,12 +48397,12 @@
.n(n)
.k(k)
.a_stride(7)
- .Test(xnn_qu8_gemm_minmax_fp32_ukernel_2x2__scalar_lrint, xnn_init_qu8_conv_minmax_fp32_scalar_lrint_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
+ .Test(xnn_qu8_gemm_minmax_fp32_ukernel_2x2__scalar_lrintf, xnn_init_qu8_conv_minmax_fp32_scalar_lrintf_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
}
}
}
-TEST(QU8_GEMM_MINMAX_FP32_2X2__SCALAR_LRINT, n_gt_2_subtile) {
+TEST(QU8_GEMM_MINMAX_FP32_2X2__SCALAR_LRINTF, n_gt_2_subtile) {
for (uint32_t n = 3; n < 4; n++) {
for (size_t k = 1; k <= 5; k += 2) {
for (uint32_t m = 1; m <= 2; m++) {
@@ -48415,13 +48415,13 @@
.n(n)
.k(k)
.iterations(1)
- .Test(xnn_qu8_gemm_minmax_fp32_ukernel_2x2__scalar_lrint, xnn_init_qu8_conv_minmax_fp32_scalar_lrint_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
+ .Test(xnn_qu8_gemm_minmax_fp32_ukernel_2x2__scalar_lrintf, xnn_init_qu8_conv_minmax_fp32_scalar_lrintf_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
}
}
}
}
-TEST(QU8_GEMM_MINMAX_FP32_2X2__SCALAR_LRINT, n_div_2) {
+TEST(QU8_GEMM_MINMAX_FP32_2X2__SCALAR_LRINTF, n_div_2) {
for (uint32_t n = 4; n <= 6; n += 2) {
for (size_t k = 1; k <= 5; k += 2) {
GemmMicrokernelTester()
@@ -48432,12 +48432,12 @@
.m(2)
.n(2)
.k(k)
- .Test(xnn_qu8_gemm_minmax_fp32_ukernel_2x2__scalar_lrint, xnn_init_qu8_conv_minmax_fp32_scalar_lrint_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
+ .Test(xnn_qu8_gemm_minmax_fp32_ukernel_2x2__scalar_lrintf, xnn_init_qu8_conv_minmax_fp32_scalar_lrintf_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
}
}
}
-TEST(QU8_GEMM_MINMAX_FP32_2X2__SCALAR_LRINT, n_div_2_strided_cn) {
+TEST(QU8_GEMM_MINMAX_FP32_2X2__SCALAR_LRINTF, n_div_2_strided_cn) {
for (uint32_t n = 4; n <= 6; n += 2) {
for (size_t k = 1; k <= 5; k += 2) {
GemmMicrokernelTester()
@@ -48449,12 +48449,12 @@
.n(n)
.k(k)
.cn_stride(5)
- .Test(xnn_qu8_gemm_minmax_fp32_ukernel_2x2__scalar_lrint, xnn_init_qu8_conv_minmax_fp32_scalar_lrint_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
+ .Test(xnn_qu8_gemm_minmax_fp32_ukernel_2x2__scalar_lrintf, xnn_init_qu8_conv_minmax_fp32_scalar_lrintf_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
}
}
}
-TEST(QU8_GEMM_MINMAX_FP32_2X2__SCALAR_LRINT, n_div_2_strided_a) {
+TEST(QU8_GEMM_MINMAX_FP32_2X2__SCALAR_LRINTF, n_div_2_strided_a) {
for (uint32_t n = 4; n <= 6; n += 2) {
for (size_t k = 1; k <= 5; k += 2) {
GemmMicrokernelTester()
@@ -48466,12 +48466,12 @@
.n(n)
.k(k)
.a_stride(7)
- .Test(xnn_qu8_gemm_minmax_fp32_ukernel_2x2__scalar_lrint, xnn_init_qu8_conv_minmax_fp32_scalar_lrint_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
+ .Test(xnn_qu8_gemm_minmax_fp32_ukernel_2x2__scalar_lrintf, xnn_init_qu8_conv_minmax_fp32_scalar_lrintf_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
}
}
}
-TEST(QU8_GEMM_MINMAX_FP32_2X2__SCALAR_LRINT, n_div_2_subtile) {
+TEST(QU8_GEMM_MINMAX_FP32_2X2__SCALAR_LRINTF, n_div_2_subtile) {
for (uint32_t n = 4; n <= 6; n += 2) {
for (size_t k = 1; k <= 5; k += 2) {
for (uint32_t m = 1; m <= 2; m++) {
@@ -48484,13 +48484,13 @@
.n(n)
.k(k)
.iterations(1)
- .Test(xnn_qu8_gemm_minmax_fp32_ukernel_2x2__scalar_lrint, xnn_init_qu8_conv_minmax_fp32_scalar_lrint_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
+ .Test(xnn_qu8_gemm_minmax_fp32_ukernel_2x2__scalar_lrintf, xnn_init_qu8_conv_minmax_fp32_scalar_lrintf_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
}
}
}
}
-TEST(QU8_GEMM_MINMAX_FP32_2X2__SCALAR_LRINT, strided_cm_subtile) {
+TEST(QU8_GEMM_MINMAX_FP32_2X2__SCALAR_LRINTF, strided_cm_subtile) {
for (size_t k = 1; k <= 5; k += 2) {
for (uint32_t m = 1; m <= 2; m++) {
for (uint32_t n = 1; n <= 2; n++) {
@@ -48504,13 +48504,13 @@
.k(k)
.cm_stride(5)
.iterations(1)
- .Test(xnn_qu8_gemm_minmax_fp32_ukernel_2x2__scalar_lrint, xnn_init_qu8_conv_minmax_fp32_scalar_lrint_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
+ .Test(xnn_qu8_gemm_minmax_fp32_ukernel_2x2__scalar_lrintf, xnn_init_qu8_conv_minmax_fp32_scalar_lrintf_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
}
}
}
}
-TEST(QU8_GEMM_MINMAX_FP32_2X2__SCALAR_LRINT, qmin) {
+TEST(QU8_GEMM_MINMAX_FP32_2X2__SCALAR_LRINTF, qmin) {
GemmMicrokernelTester()
.mr(2)
.nr(2)
@@ -48520,10 +48520,10 @@
.n(2)
.k(1)
.qmin(128)
- .Test(xnn_qu8_gemm_minmax_fp32_ukernel_2x2__scalar_lrint, xnn_init_qu8_conv_minmax_fp32_scalar_lrint_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
+ .Test(xnn_qu8_gemm_minmax_fp32_ukernel_2x2__scalar_lrintf, xnn_init_qu8_conv_minmax_fp32_scalar_lrintf_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
}
-TEST(QU8_GEMM_MINMAX_FP32_2X2__SCALAR_LRINT, qmax) {
+TEST(QU8_GEMM_MINMAX_FP32_2X2__SCALAR_LRINTF, qmax) {
GemmMicrokernelTester()
.mr(2)
.nr(2)
@@ -48533,10 +48533,10 @@
.n(2)
.k(1)
.qmax(128)
- .Test(xnn_qu8_gemm_minmax_fp32_ukernel_2x2__scalar_lrint, xnn_init_qu8_conv_minmax_fp32_scalar_lrint_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
+ .Test(xnn_qu8_gemm_minmax_fp32_ukernel_2x2__scalar_lrintf, xnn_init_qu8_conv_minmax_fp32_scalar_lrintf_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
}
-TEST(QU8_GEMM_MINMAX_FP32_2X2__SCALAR_LRINT, strided_cm) {
+TEST(QU8_GEMM_MINMAX_FP32_2X2__SCALAR_LRINTF, strided_cm) {
GemmMicrokernelTester()
.mr(2)
.nr(2)
@@ -48546,10 +48546,10 @@
.n(2)
.k(1)
.cm_stride(5)
- .Test(xnn_qu8_gemm_minmax_fp32_ukernel_2x2__scalar_lrint, xnn_init_qu8_conv_minmax_fp32_scalar_lrint_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
+ .Test(xnn_qu8_gemm_minmax_fp32_ukernel_2x2__scalar_lrintf, xnn_init_qu8_conv_minmax_fp32_scalar_lrintf_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
}
-TEST(QU8_GEMM_MINMAX_FP32_2X2__SCALAR_LRINT, no_a_zero_point) {
+TEST(QU8_GEMM_MINMAX_FP32_2X2__SCALAR_LRINTF, no_a_zero_point) {
for (size_t k = 1; k <= 5; k += 2) {
GemmMicrokernelTester()
.mr(2)
@@ -48560,11 +48560,11 @@
.n(2)
.k(k)
.a_zero_point(0)
- .Test(xnn_qu8_gemm_minmax_fp32_ukernel_2x2__scalar_lrint, xnn_init_qu8_conv_minmax_fp32_scalar_lrint_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
+ .Test(xnn_qu8_gemm_minmax_fp32_ukernel_2x2__scalar_lrintf, xnn_init_qu8_conv_minmax_fp32_scalar_lrintf_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
}
}
-TEST(QU8_GEMM_MINMAX_FP32_2X2__SCALAR_LRINT, no_b_zero_point) {
+TEST(QU8_GEMM_MINMAX_FP32_2X2__SCALAR_LRINTF, no_b_zero_point) {
for (size_t k = 1; k <= 5; k += 2) {
GemmMicrokernelTester()
.mr(2)
@@ -48575,11 +48575,11 @@
.n(2)
.k(k)
.b_zero_point(0)
- .Test(xnn_qu8_gemm_minmax_fp32_ukernel_2x2__scalar_lrint, xnn_init_qu8_conv_minmax_fp32_scalar_lrint_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
+ .Test(xnn_qu8_gemm_minmax_fp32_ukernel_2x2__scalar_lrintf, xnn_init_qu8_conv_minmax_fp32_scalar_lrintf_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
}
}
-TEST(QU8_GEMM_MINMAX_FP32_2X2__SCALAR_LRINT, no_zero_point) {
+TEST(QU8_GEMM_MINMAX_FP32_2X2__SCALAR_LRINTF, no_zero_point) {
for (size_t k = 1; k <= 5; k += 2) {
GemmMicrokernelTester()
.mr(2)
@@ -48591,11 +48591,11 @@
.k(k)
.a_zero_point(0)
.b_zero_point(0)
- .Test(xnn_qu8_gemm_minmax_fp32_ukernel_2x2__scalar_lrint, xnn_init_qu8_conv_minmax_fp32_scalar_lrint_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
+ .Test(xnn_qu8_gemm_minmax_fp32_ukernel_2x2__scalar_lrintf, xnn_init_qu8_conv_minmax_fp32_scalar_lrintf_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
}
}
-TEST(QU8_GEMM_MINMAX_FP32_3X2__SCALAR_LRINT, k_eq_1) {
+TEST(QU8_GEMM_MINMAX_FP32_3X2__SCALAR_LRINTF, k_eq_1) {
GemmMicrokernelTester()
.mr(3)
.nr(2)
@@ -48604,10 +48604,10 @@
.m(3)
.n(2)
.k(1)
- .Test(xnn_qu8_gemm_minmax_fp32_ukernel_3x2__scalar_lrint, xnn_init_qu8_conv_minmax_fp32_scalar_lrint_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
+ .Test(xnn_qu8_gemm_minmax_fp32_ukernel_3x2__scalar_lrintf, xnn_init_qu8_conv_minmax_fp32_scalar_lrintf_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
}
-TEST(QU8_GEMM_MINMAX_FP32_3X2__SCALAR_LRINT, strided_cn) {
+TEST(QU8_GEMM_MINMAX_FP32_3X2__SCALAR_LRINTF, strided_cn) {
GemmMicrokernelTester()
.mr(3)
.nr(2)
@@ -48617,10 +48617,10 @@
.n(2)
.k(1)
.cn_stride(5)
- .Test(xnn_qu8_gemm_minmax_fp32_ukernel_3x2__scalar_lrint, xnn_init_qu8_conv_minmax_fp32_scalar_lrint_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
+ .Test(xnn_qu8_gemm_minmax_fp32_ukernel_3x2__scalar_lrintf, xnn_init_qu8_conv_minmax_fp32_scalar_lrintf_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
}
-TEST(QU8_GEMM_MINMAX_FP32_3X2__SCALAR_LRINT, k_eq_1_strided_a) {
+TEST(QU8_GEMM_MINMAX_FP32_3X2__SCALAR_LRINTF, k_eq_1_strided_a) {
GemmMicrokernelTester()
.mr(3)
.nr(2)
@@ -48630,10 +48630,10 @@
.n(2)
.k(1)
.a_stride(3)
- .Test(xnn_qu8_gemm_minmax_fp32_ukernel_3x2__scalar_lrint, xnn_init_qu8_conv_minmax_fp32_scalar_lrint_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
+ .Test(xnn_qu8_gemm_minmax_fp32_ukernel_3x2__scalar_lrintf, xnn_init_qu8_conv_minmax_fp32_scalar_lrintf_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
}
-TEST(QU8_GEMM_MINMAX_FP32_3X2__SCALAR_LRINT, k_eq_1_subtile) {
+TEST(QU8_GEMM_MINMAX_FP32_3X2__SCALAR_LRINTF, k_eq_1_subtile) {
for (uint32_t m = 1; m <= 3; m++) {
for (uint32_t n = 1; n <= 2; n++) {
GemmMicrokernelTester()
@@ -48645,12 +48645,12 @@
.n(n)
.k(1)
.iterations(1)
- .Test(xnn_qu8_gemm_minmax_fp32_ukernel_3x2__scalar_lrint, xnn_init_qu8_conv_minmax_fp32_scalar_lrint_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
+ .Test(xnn_qu8_gemm_minmax_fp32_ukernel_3x2__scalar_lrintf, xnn_init_qu8_conv_minmax_fp32_scalar_lrintf_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
}
}
}
-TEST(QU8_GEMM_MINMAX_FP32_3X2__SCALAR_LRINT, k_eq_1_subtile_m) {
+TEST(QU8_GEMM_MINMAX_FP32_3X2__SCALAR_LRINTF, k_eq_1_subtile_m) {
for (uint32_t m = 1; m <= 3; m++) {
GemmMicrokernelTester()
.mr(3)
@@ -48661,11 +48661,11 @@
.n(2)
.k(1)
.iterations(1)
- .Test(xnn_qu8_gemm_minmax_fp32_ukernel_3x2__scalar_lrint, xnn_init_qu8_conv_minmax_fp32_scalar_lrint_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
+ .Test(xnn_qu8_gemm_minmax_fp32_ukernel_3x2__scalar_lrintf, xnn_init_qu8_conv_minmax_fp32_scalar_lrintf_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
}
}
-TEST(QU8_GEMM_MINMAX_FP32_3X2__SCALAR_LRINT, k_eq_1_subtile_n) {
+TEST(QU8_GEMM_MINMAX_FP32_3X2__SCALAR_LRINTF, k_eq_1_subtile_n) {
for (uint32_t n = 1; n <= 2; n++) {
GemmMicrokernelTester()
.mr(3)
@@ -48676,11 +48676,11 @@
.n(n)
.k(1)
.iterations(1)
- .Test(xnn_qu8_gemm_minmax_fp32_ukernel_3x2__scalar_lrint, xnn_init_qu8_conv_minmax_fp32_scalar_lrint_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
+ .Test(xnn_qu8_gemm_minmax_fp32_ukernel_3x2__scalar_lrintf, xnn_init_qu8_conv_minmax_fp32_scalar_lrintf_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
}
}
-TEST(QU8_GEMM_MINMAX_FP32_3X2__SCALAR_LRINT, k_gt_1) {
+TEST(QU8_GEMM_MINMAX_FP32_3X2__SCALAR_LRINTF, k_gt_1) {
for (size_t k = 2; k < 10; k++) {
GemmMicrokernelTester()
.mr(3)
@@ -48690,11 +48690,11 @@
.m(3)
.n(2)
.k(k)
- .Test(xnn_qu8_gemm_minmax_fp32_ukernel_3x2__scalar_lrint, xnn_init_qu8_conv_minmax_fp32_scalar_lrint_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
+ .Test(xnn_qu8_gemm_minmax_fp32_ukernel_3x2__scalar_lrintf, xnn_init_qu8_conv_minmax_fp32_scalar_lrintf_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
}
}
-TEST(QU8_GEMM_MINMAX_FP32_3X2__SCALAR_LRINT, k_gt_1_strided_a) {
+TEST(QU8_GEMM_MINMAX_FP32_3X2__SCALAR_LRINTF, k_gt_1_strided_a) {
for (size_t k = 2; k < 10; k++) {
GemmMicrokernelTester()
.mr(3)
@@ -48705,11 +48705,11 @@
.n(2)
.k(k)
.a_stride(11)
- .Test(xnn_qu8_gemm_minmax_fp32_ukernel_3x2__scalar_lrint, xnn_init_qu8_conv_minmax_fp32_scalar_lrint_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
+ .Test(xnn_qu8_gemm_minmax_fp32_ukernel_3x2__scalar_lrintf, xnn_init_qu8_conv_minmax_fp32_scalar_lrintf_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
}
}
-TEST(QU8_GEMM_MINMAX_FP32_3X2__SCALAR_LRINT, k_gt_1_subtile) {
+TEST(QU8_GEMM_MINMAX_FP32_3X2__SCALAR_LRINTF, k_gt_1_subtile) {
for (size_t k = 2; k < 10; k++) {
for (uint32_t m = 1; m <= 3; m++) {
for (uint32_t n = 1; n <= 2; n++) {
@@ -48722,13 +48722,13 @@
.n(n)
.k(k)
.iterations(1)
- .Test(xnn_qu8_gemm_minmax_fp32_ukernel_3x2__scalar_lrint, xnn_init_qu8_conv_minmax_fp32_scalar_lrint_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
+ .Test(xnn_qu8_gemm_minmax_fp32_ukernel_3x2__scalar_lrintf, xnn_init_qu8_conv_minmax_fp32_scalar_lrintf_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
}
}
}
}
-TEST(QU8_GEMM_MINMAX_FP32_3X2__SCALAR_LRINT, n_gt_2) {
+TEST(QU8_GEMM_MINMAX_FP32_3X2__SCALAR_LRINTF, n_gt_2) {
for (uint32_t n = 3; n < 4; n++) {
for (size_t k = 1; k <= 5; k += 2) {
GemmMicrokernelTester()
@@ -48739,12 +48739,12 @@
.m(3)
.n(2)
.k(k)
- .Test(xnn_qu8_gemm_minmax_fp32_ukernel_3x2__scalar_lrint, xnn_init_qu8_conv_minmax_fp32_scalar_lrint_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
+ .Test(xnn_qu8_gemm_minmax_fp32_ukernel_3x2__scalar_lrintf, xnn_init_qu8_conv_minmax_fp32_scalar_lrintf_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
}
}
}
-TEST(QU8_GEMM_MINMAX_FP32_3X2__SCALAR_LRINT, n_gt_2_strided_cn) {
+TEST(QU8_GEMM_MINMAX_FP32_3X2__SCALAR_LRINTF, n_gt_2_strided_cn) {
for (uint32_t n = 3; n < 4; n++) {
for (size_t k = 1; k <= 5; k += 2) {
GemmMicrokernelTester()
@@ -48756,12 +48756,12 @@
.n(2)
.k(k)
.cn_stride(5)
- .Test(xnn_qu8_gemm_minmax_fp32_ukernel_3x2__scalar_lrint, xnn_init_qu8_conv_minmax_fp32_scalar_lrint_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
+ .Test(xnn_qu8_gemm_minmax_fp32_ukernel_3x2__scalar_lrintf, xnn_init_qu8_conv_minmax_fp32_scalar_lrintf_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
}
}
}
-TEST(QU8_GEMM_MINMAX_FP32_3X2__SCALAR_LRINT, n_gt_2_strided_a) {
+TEST(QU8_GEMM_MINMAX_FP32_3X2__SCALAR_LRINTF, n_gt_2_strided_a) {
for (uint32_t n = 3; n < 4; n++) {
for (size_t k = 1; k <= 5; k += 2) {
GemmMicrokernelTester()
@@ -48773,12 +48773,12 @@
.n(n)
.k(k)
.a_stride(7)
- .Test(xnn_qu8_gemm_minmax_fp32_ukernel_3x2__scalar_lrint, xnn_init_qu8_conv_minmax_fp32_scalar_lrint_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
+ .Test(xnn_qu8_gemm_minmax_fp32_ukernel_3x2__scalar_lrintf, xnn_init_qu8_conv_minmax_fp32_scalar_lrintf_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
}
}
}
-TEST(QU8_GEMM_MINMAX_FP32_3X2__SCALAR_LRINT, n_gt_2_subtile) {
+TEST(QU8_GEMM_MINMAX_FP32_3X2__SCALAR_LRINTF, n_gt_2_subtile) {
for (uint32_t n = 3; n < 4; n++) {
for (size_t k = 1; k <= 5; k += 2) {
for (uint32_t m = 1; m <= 3; m++) {
@@ -48791,13 +48791,13 @@
.n(n)
.k(k)
.iterations(1)
- .Test(xnn_qu8_gemm_minmax_fp32_ukernel_3x2__scalar_lrint, xnn_init_qu8_conv_minmax_fp32_scalar_lrint_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
+ .Test(xnn_qu8_gemm_minmax_fp32_ukernel_3x2__scalar_lrintf, xnn_init_qu8_conv_minmax_fp32_scalar_lrintf_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
}
}
}
}
-TEST(QU8_GEMM_MINMAX_FP32_3X2__SCALAR_LRINT, n_div_2) {
+TEST(QU8_GEMM_MINMAX_FP32_3X2__SCALAR_LRINTF, n_div_2) {
for (uint32_t n = 4; n <= 6; n += 2) {
for (size_t k = 1; k <= 5; k += 2) {
GemmMicrokernelTester()
@@ -48808,12 +48808,12 @@
.m(3)
.n(2)
.k(k)
- .Test(xnn_qu8_gemm_minmax_fp32_ukernel_3x2__scalar_lrint, xnn_init_qu8_conv_minmax_fp32_scalar_lrint_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
+ .Test(xnn_qu8_gemm_minmax_fp32_ukernel_3x2__scalar_lrintf, xnn_init_qu8_conv_minmax_fp32_scalar_lrintf_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
}
}
}
-TEST(QU8_GEMM_MINMAX_FP32_3X2__SCALAR_LRINT, n_div_2_strided_cn) {
+TEST(QU8_GEMM_MINMAX_FP32_3X2__SCALAR_LRINTF, n_div_2_strided_cn) {
for (uint32_t n = 4; n <= 6; n += 2) {
for (size_t k = 1; k <= 5; k += 2) {
GemmMicrokernelTester()
@@ -48825,12 +48825,12 @@
.n(n)
.k(k)
.cn_stride(5)
- .Test(xnn_qu8_gemm_minmax_fp32_ukernel_3x2__scalar_lrint, xnn_init_qu8_conv_minmax_fp32_scalar_lrint_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
+ .Test(xnn_qu8_gemm_minmax_fp32_ukernel_3x2__scalar_lrintf, xnn_init_qu8_conv_minmax_fp32_scalar_lrintf_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
}
}
}
-TEST(QU8_GEMM_MINMAX_FP32_3X2__SCALAR_LRINT, n_div_2_strided_a) {
+TEST(QU8_GEMM_MINMAX_FP32_3X2__SCALAR_LRINTF, n_div_2_strided_a) {
for (uint32_t n = 4; n <= 6; n += 2) {
for (size_t k = 1; k <= 5; k += 2) {
GemmMicrokernelTester()
@@ -48842,12 +48842,12 @@
.n(n)
.k(k)
.a_stride(7)
- .Test(xnn_qu8_gemm_minmax_fp32_ukernel_3x2__scalar_lrint, xnn_init_qu8_conv_minmax_fp32_scalar_lrint_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
+ .Test(xnn_qu8_gemm_minmax_fp32_ukernel_3x2__scalar_lrintf, xnn_init_qu8_conv_minmax_fp32_scalar_lrintf_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
}
}
}
-TEST(QU8_GEMM_MINMAX_FP32_3X2__SCALAR_LRINT, n_div_2_subtile) {
+TEST(QU8_GEMM_MINMAX_FP32_3X2__SCALAR_LRINTF, n_div_2_subtile) {
for (uint32_t n = 4; n <= 6; n += 2) {
for (size_t k = 1; k <= 5; k += 2) {
for (uint32_t m = 1; m <= 3; m++) {
@@ -48860,13 +48860,13 @@
.n(n)
.k(k)
.iterations(1)
- .Test(xnn_qu8_gemm_minmax_fp32_ukernel_3x2__scalar_lrint, xnn_init_qu8_conv_minmax_fp32_scalar_lrint_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
+ .Test(xnn_qu8_gemm_minmax_fp32_ukernel_3x2__scalar_lrintf, xnn_init_qu8_conv_minmax_fp32_scalar_lrintf_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
}
}
}
}
-TEST(QU8_GEMM_MINMAX_FP32_3X2__SCALAR_LRINT, strided_cm_subtile) {
+TEST(QU8_GEMM_MINMAX_FP32_3X2__SCALAR_LRINTF, strided_cm_subtile) {
for (size_t k = 1; k <= 5; k += 2) {
for (uint32_t m = 1; m <= 3; m++) {
for (uint32_t n = 1; n <= 2; n++) {
@@ -48880,13 +48880,13 @@
.k(k)
.cm_stride(5)
.iterations(1)
- .Test(xnn_qu8_gemm_minmax_fp32_ukernel_3x2__scalar_lrint, xnn_init_qu8_conv_minmax_fp32_scalar_lrint_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
+ .Test(xnn_qu8_gemm_minmax_fp32_ukernel_3x2__scalar_lrintf, xnn_init_qu8_conv_minmax_fp32_scalar_lrintf_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
}
}
}
}
-TEST(QU8_GEMM_MINMAX_FP32_3X2__SCALAR_LRINT, qmin) {
+TEST(QU8_GEMM_MINMAX_FP32_3X2__SCALAR_LRINTF, qmin) {
GemmMicrokernelTester()
.mr(3)
.nr(2)
@@ -48896,10 +48896,10 @@
.n(2)
.k(1)
.qmin(128)
- .Test(xnn_qu8_gemm_minmax_fp32_ukernel_3x2__scalar_lrint, xnn_init_qu8_conv_minmax_fp32_scalar_lrint_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
+ .Test(xnn_qu8_gemm_minmax_fp32_ukernel_3x2__scalar_lrintf, xnn_init_qu8_conv_minmax_fp32_scalar_lrintf_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
}
-TEST(QU8_GEMM_MINMAX_FP32_3X2__SCALAR_LRINT, qmax) {
+TEST(QU8_GEMM_MINMAX_FP32_3X2__SCALAR_LRINTF, qmax) {
GemmMicrokernelTester()
.mr(3)
.nr(2)
@@ -48909,10 +48909,10 @@
.n(2)
.k(1)
.qmax(128)
- .Test(xnn_qu8_gemm_minmax_fp32_ukernel_3x2__scalar_lrint, xnn_init_qu8_conv_minmax_fp32_scalar_lrint_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
+ .Test(xnn_qu8_gemm_minmax_fp32_ukernel_3x2__scalar_lrintf, xnn_init_qu8_conv_minmax_fp32_scalar_lrintf_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
}
-TEST(QU8_GEMM_MINMAX_FP32_3X2__SCALAR_LRINT, strided_cm) {
+TEST(QU8_GEMM_MINMAX_FP32_3X2__SCALAR_LRINTF, strided_cm) {
GemmMicrokernelTester()
.mr(3)
.nr(2)
@@ -48922,10 +48922,10 @@
.n(2)
.k(1)
.cm_stride(5)
- .Test(xnn_qu8_gemm_minmax_fp32_ukernel_3x2__scalar_lrint, xnn_init_qu8_conv_minmax_fp32_scalar_lrint_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
+ .Test(xnn_qu8_gemm_minmax_fp32_ukernel_3x2__scalar_lrintf, xnn_init_qu8_conv_minmax_fp32_scalar_lrintf_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
}
-TEST(QU8_GEMM_MINMAX_FP32_3X2__SCALAR_LRINT, no_a_zero_point) {
+TEST(QU8_GEMM_MINMAX_FP32_3X2__SCALAR_LRINTF, no_a_zero_point) {
for (size_t k = 1; k <= 5; k += 2) {
GemmMicrokernelTester()
.mr(3)
@@ -48936,11 +48936,11 @@
.n(2)
.k(k)
.a_zero_point(0)
- .Test(xnn_qu8_gemm_minmax_fp32_ukernel_3x2__scalar_lrint, xnn_init_qu8_conv_minmax_fp32_scalar_lrint_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
+ .Test(xnn_qu8_gemm_minmax_fp32_ukernel_3x2__scalar_lrintf, xnn_init_qu8_conv_minmax_fp32_scalar_lrintf_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
}
}
-TEST(QU8_GEMM_MINMAX_FP32_3X2__SCALAR_LRINT, no_b_zero_point) {
+TEST(QU8_GEMM_MINMAX_FP32_3X2__SCALAR_LRINTF, no_b_zero_point) {
for (size_t k = 1; k <= 5; k += 2) {
GemmMicrokernelTester()
.mr(3)
@@ -48951,11 +48951,11 @@
.n(2)
.k(k)
.b_zero_point(0)
- .Test(xnn_qu8_gemm_minmax_fp32_ukernel_3x2__scalar_lrint, xnn_init_qu8_conv_minmax_fp32_scalar_lrint_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
+ .Test(xnn_qu8_gemm_minmax_fp32_ukernel_3x2__scalar_lrintf, xnn_init_qu8_conv_minmax_fp32_scalar_lrintf_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
}
}
-TEST(QU8_GEMM_MINMAX_FP32_3X2__SCALAR_LRINT, no_zero_point) {
+TEST(QU8_GEMM_MINMAX_FP32_3X2__SCALAR_LRINTF, no_zero_point) {
for (size_t k = 1; k <= 5; k += 2) {
GemmMicrokernelTester()
.mr(3)
@@ -48967,11 +48967,11 @@
.k(k)
.a_zero_point(0)
.b_zero_point(0)
- .Test(xnn_qu8_gemm_minmax_fp32_ukernel_3x2__scalar_lrint, xnn_init_qu8_conv_minmax_fp32_scalar_lrint_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
+ .Test(xnn_qu8_gemm_minmax_fp32_ukernel_3x2__scalar_lrintf, xnn_init_qu8_conv_minmax_fp32_scalar_lrintf_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
}
}
-TEST(QU8_GEMM_MINMAX_FP32_4X2__SCALAR_LRINT, k_eq_1) {
+TEST(QU8_GEMM_MINMAX_FP32_4X2__SCALAR_LRINTF, k_eq_1) {
GemmMicrokernelTester()
.mr(4)
.nr(2)
@@ -48980,10 +48980,10 @@
.m(4)
.n(2)
.k(1)
- .Test(xnn_qu8_gemm_minmax_fp32_ukernel_4x2__scalar_lrint, xnn_init_qu8_conv_minmax_fp32_scalar_lrint_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
+ .Test(xnn_qu8_gemm_minmax_fp32_ukernel_4x2__scalar_lrintf, xnn_init_qu8_conv_minmax_fp32_scalar_lrintf_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
}
-TEST(QU8_GEMM_MINMAX_FP32_4X2__SCALAR_LRINT, strided_cn) {
+TEST(QU8_GEMM_MINMAX_FP32_4X2__SCALAR_LRINTF, strided_cn) {
GemmMicrokernelTester()
.mr(4)
.nr(2)
@@ -48993,10 +48993,10 @@
.n(2)
.k(1)
.cn_stride(5)
- .Test(xnn_qu8_gemm_minmax_fp32_ukernel_4x2__scalar_lrint, xnn_init_qu8_conv_minmax_fp32_scalar_lrint_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
+ .Test(xnn_qu8_gemm_minmax_fp32_ukernel_4x2__scalar_lrintf, xnn_init_qu8_conv_minmax_fp32_scalar_lrintf_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
}
-TEST(QU8_GEMM_MINMAX_FP32_4X2__SCALAR_LRINT, k_eq_1_strided_a) {
+TEST(QU8_GEMM_MINMAX_FP32_4X2__SCALAR_LRINTF, k_eq_1_strided_a) {
GemmMicrokernelTester()
.mr(4)
.nr(2)
@@ -49006,10 +49006,10 @@
.n(2)
.k(1)
.a_stride(3)
- .Test(xnn_qu8_gemm_minmax_fp32_ukernel_4x2__scalar_lrint, xnn_init_qu8_conv_minmax_fp32_scalar_lrint_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
+ .Test(xnn_qu8_gemm_minmax_fp32_ukernel_4x2__scalar_lrintf, xnn_init_qu8_conv_minmax_fp32_scalar_lrintf_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
}
-TEST(QU8_GEMM_MINMAX_FP32_4X2__SCALAR_LRINT, k_eq_1_subtile) {
+TEST(QU8_GEMM_MINMAX_FP32_4X2__SCALAR_LRINTF, k_eq_1_subtile) {
for (uint32_t m = 1; m <= 4; m++) {
for (uint32_t n = 1; n <= 2; n++) {
GemmMicrokernelTester()
@@ -49021,12 +49021,12 @@
.n(n)
.k(1)
.iterations(1)
- .Test(xnn_qu8_gemm_minmax_fp32_ukernel_4x2__scalar_lrint, xnn_init_qu8_conv_minmax_fp32_scalar_lrint_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
+ .Test(xnn_qu8_gemm_minmax_fp32_ukernel_4x2__scalar_lrintf, xnn_init_qu8_conv_minmax_fp32_scalar_lrintf_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
}
}
}
-TEST(QU8_GEMM_MINMAX_FP32_4X2__SCALAR_LRINT, k_eq_1_subtile_m) {
+TEST(QU8_GEMM_MINMAX_FP32_4X2__SCALAR_LRINTF, k_eq_1_subtile_m) {
for (uint32_t m = 1; m <= 4; m++) {
GemmMicrokernelTester()
.mr(4)
@@ -49037,11 +49037,11 @@
.n(2)
.k(1)
.iterations(1)
- .Test(xnn_qu8_gemm_minmax_fp32_ukernel_4x2__scalar_lrint, xnn_init_qu8_conv_minmax_fp32_scalar_lrint_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
+ .Test(xnn_qu8_gemm_minmax_fp32_ukernel_4x2__scalar_lrintf, xnn_init_qu8_conv_minmax_fp32_scalar_lrintf_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
}
}
-TEST(QU8_GEMM_MINMAX_FP32_4X2__SCALAR_LRINT, k_eq_1_subtile_n) {
+TEST(QU8_GEMM_MINMAX_FP32_4X2__SCALAR_LRINTF, k_eq_1_subtile_n) {
for (uint32_t n = 1; n <= 2; n++) {
GemmMicrokernelTester()
.mr(4)
@@ -49052,11 +49052,11 @@
.n(n)
.k(1)
.iterations(1)
- .Test(xnn_qu8_gemm_minmax_fp32_ukernel_4x2__scalar_lrint, xnn_init_qu8_conv_minmax_fp32_scalar_lrint_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
+ .Test(xnn_qu8_gemm_minmax_fp32_ukernel_4x2__scalar_lrintf, xnn_init_qu8_conv_minmax_fp32_scalar_lrintf_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
}
}
-TEST(QU8_GEMM_MINMAX_FP32_4X2__SCALAR_LRINT, k_gt_1) {
+TEST(QU8_GEMM_MINMAX_FP32_4X2__SCALAR_LRINTF, k_gt_1) {
for (size_t k = 2; k < 10; k++) {
GemmMicrokernelTester()
.mr(4)
@@ -49066,11 +49066,11 @@
.m(4)
.n(2)
.k(k)
- .Test(xnn_qu8_gemm_minmax_fp32_ukernel_4x2__scalar_lrint, xnn_init_qu8_conv_minmax_fp32_scalar_lrint_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
+ .Test(xnn_qu8_gemm_minmax_fp32_ukernel_4x2__scalar_lrintf, xnn_init_qu8_conv_minmax_fp32_scalar_lrintf_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
}
}
-TEST(QU8_GEMM_MINMAX_FP32_4X2__SCALAR_LRINT, k_gt_1_strided_a) {
+TEST(QU8_GEMM_MINMAX_FP32_4X2__SCALAR_LRINTF, k_gt_1_strided_a) {
for (size_t k = 2; k < 10; k++) {
GemmMicrokernelTester()
.mr(4)
@@ -49081,11 +49081,11 @@
.n(2)
.k(k)
.a_stride(11)
- .Test(xnn_qu8_gemm_minmax_fp32_ukernel_4x2__scalar_lrint, xnn_init_qu8_conv_minmax_fp32_scalar_lrint_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
+ .Test(xnn_qu8_gemm_minmax_fp32_ukernel_4x2__scalar_lrintf, xnn_init_qu8_conv_minmax_fp32_scalar_lrintf_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
}
}
-TEST(QU8_GEMM_MINMAX_FP32_4X2__SCALAR_LRINT, k_gt_1_subtile) {
+TEST(QU8_GEMM_MINMAX_FP32_4X2__SCALAR_LRINTF, k_gt_1_subtile) {
for (size_t k = 2; k < 10; k++) {
for (uint32_t m = 1; m <= 4; m++) {
for (uint32_t n = 1; n <= 2; n++) {
@@ -49098,13 +49098,13 @@
.n(n)
.k(k)
.iterations(1)
- .Test(xnn_qu8_gemm_minmax_fp32_ukernel_4x2__scalar_lrint, xnn_init_qu8_conv_minmax_fp32_scalar_lrint_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
+ .Test(xnn_qu8_gemm_minmax_fp32_ukernel_4x2__scalar_lrintf, xnn_init_qu8_conv_minmax_fp32_scalar_lrintf_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
}
}
}
}
-TEST(QU8_GEMM_MINMAX_FP32_4X2__SCALAR_LRINT, n_gt_2) {
+TEST(QU8_GEMM_MINMAX_FP32_4X2__SCALAR_LRINTF, n_gt_2) {
for (uint32_t n = 3; n < 4; n++) {
for (size_t k = 1; k <= 5; k += 2) {
GemmMicrokernelTester()
@@ -49115,12 +49115,12 @@
.m(4)
.n(2)
.k(k)
- .Test(xnn_qu8_gemm_minmax_fp32_ukernel_4x2__scalar_lrint, xnn_init_qu8_conv_minmax_fp32_scalar_lrint_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
+ .Test(xnn_qu8_gemm_minmax_fp32_ukernel_4x2__scalar_lrintf, xnn_init_qu8_conv_minmax_fp32_scalar_lrintf_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
}
}
}
-TEST(QU8_GEMM_MINMAX_FP32_4X2__SCALAR_LRINT, n_gt_2_strided_cn) {
+TEST(QU8_GEMM_MINMAX_FP32_4X2__SCALAR_LRINTF, n_gt_2_strided_cn) {
for (uint32_t n = 3; n < 4; n++) {
for (size_t k = 1; k <= 5; k += 2) {
GemmMicrokernelTester()
@@ -49132,12 +49132,12 @@
.n(2)
.k(k)
.cn_stride(5)
- .Test(xnn_qu8_gemm_minmax_fp32_ukernel_4x2__scalar_lrint, xnn_init_qu8_conv_minmax_fp32_scalar_lrint_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
+ .Test(xnn_qu8_gemm_minmax_fp32_ukernel_4x2__scalar_lrintf, xnn_init_qu8_conv_minmax_fp32_scalar_lrintf_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
}
}
}
-TEST(QU8_GEMM_MINMAX_FP32_4X2__SCALAR_LRINT, n_gt_2_strided_a) {
+TEST(QU8_GEMM_MINMAX_FP32_4X2__SCALAR_LRINTF, n_gt_2_strided_a) {
for (uint32_t n = 3; n < 4; n++) {
for (size_t k = 1; k <= 5; k += 2) {
GemmMicrokernelTester()
@@ -49149,12 +49149,12 @@
.n(n)
.k(k)
.a_stride(7)
- .Test(xnn_qu8_gemm_minmax_fp32_ukernel_4x2__scalar_lrint, xnn_init_qu8_conv_minmax_fp32_scalar_lrint_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
+ .Test(xnn_qu8_gemm_minmax_fp32_ukernel_4x2__scalar_lrintf, xnn_init_qu8_conv_minmax_fp32_scalar_lrintf_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
}
}
}
-TEST(QU8_GEMM_MINMAX_FP32_4X2__SCALAR_LRINT, n_gt_2_subtile) {
+TEST(QU8_GEMM_MINMAX_FP32_4X2__SCALAR_LRINTF, n_gt_2_subtile) {
for (uint32_t n = 3; n < 4; n++) {
for (size_t k = 1; k <= 5; k += 2) {
for (uint32_t m = 1; m <= 4; m++) {
@@ -49167,13 +49167,13 @@
.n(n)
.k(k)
.iterations(1)
- .Test(xnn_qu8_gemm_minmax_fp32_ukernel_4x2__scalar_lrint, xnn_init_qu8_conv_minmax_fp32_scalar_lrint_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
+ .Test(xnn_qu8_gemm_minmax_fp32_ukernel_4x2__scalar_lrintf, xnn_init_qu8_conv_minmax_fp32_scalar_lrintf_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
}
}
}
}
-TEST(QU8_GEMM_MINMAX_FP32_4X2__SCALAR_LRINT, n_div_2) {
+TEST(QU8_GEMM_MINMAX_FP32_4X2__SCALAR_LRINTF, n_div_2) {
for (uint32_t n = 4; n <= 6; n += 2) {
for (size_t k = 1; k <= 5; k += 2) {
GemmMicrokernelTester()
@@ -49184,12 +49184,12 @@
.m(4)
.n(2)
.k(k)
- .Test(xnn_qu8_gemm_minmax_fp32_ukernel_4x2__scalar_lrint, xnn_init_qu8_conv_minmax_fp32_scalar_lrint_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
+ .Test(xnn_qu8_gemm_minmax_fp32_ukernel_4x2__scalar_lrintf, xnn_init_qu8_conv_minmax_fp32_scalar_lrintf_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
}
}
}
-TEST(QU8_GEMM_MINMAX_FP32_4X2__SCALAR_LRINT, n_div_2_strided_cn) {
+TEST(QU8_GEMM_MINMAX_FP32_4X2__SCALAR_LRINTF, n_div_2_strided_cn) {
for (uint32_t n = 4; n <= 6; n += 2) {
for (size_t k = 1; k <= 5; k += 2) {
GemmMicrokernelTester()
@@ -49201,12 +49201,12 @@
.n(n)
.k(k)
.cn_stride(5)
- .Test(xnn_qu8_gemm_minmax_fp32_ukernel_4x2__scalar_lrint, xnn_init_qu8_conv_minmax_fp32_scalar_lrint_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
+ .Test(xnn_qu8_gemm_minmax_fp32_ukernel_4x2__scalar_lrintf, xnn_init_qu8_conv_minmax_fp32_scalar_lrintf_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
}
}
}
-TEST(QU8_GEMM_MINMAX_FP32_4X2__SCALAR_LRINT, n_div_2_strided_a) {
+TEST(QU8_GEMM_MINMAX_FP32_4X2__SCALAR_LRINTF, n_div_2_strided_a) {
for (uint32_t n = 4; n <= 6; n += 2) {
for (size_t k = 1; k <= 5; k += 2) {
GemmMicrokernelTester()
@@ -49218,12 +49218,12 @@
.n(n)
.k(k)
.a_stride(7)
- .Test(xnn_qu8_gemm_minmax_fp32_ukernel_4x2__scalar_lrint, xnn_init_qu8_conv_minmax_fp32_scalar_lrint_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
+ .Test(xnn_qu8_gemm_minmax_fp32_ukernel_4x2__scalar_lrintf, xnn_init_qu8_conv_minmax_fp32_scalar_lrintf_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
}
}
}
-TEST(QU8_GEMM_MINMAX_FP32_4X2__SCALAR_LRINT, n_div_2_subtile) {
+TEST(QU8_GEMM_MINMAX_FP32_4X2__SCALAR_LRINTF, n_div_2_subtile) {
for (uint32_t n = 4; n <= 6; n += 2) {
for (size_t k = 1; k <= 5; k += 2) {
for (uint32_t m = 1; m <= 4; m++) {
@@ -49236,13 +49236,13 @@
.n(n)
.k(k)
.iterations(1)
- .Test(xnn_qu8_gemm_minmax_fp32_ukernel_4x2__scalar_lrint, xnn_init_qu8_conv_minmax_fp32_scalar_lrint_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
+ .Test(xnn_qu8_gemm_minmax_fp32_ukernel_4x2__scalar_lrintf, xnn_init_qu8_conv_minmax_fp32_scalar_lrintf_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
}
}
}
}
-TEST(QU8_GEMM_MINMAX_FP32_4X2__SCALAR_LRINT, strided_cm_subtile) {
+TEST(QU8_GEMM_MINMAX_FP32_4X2__SCALAR_LRINTF, strided_cm_subtile) {
for (size_t k = 1; k <= 5; k += 2) {
for (uint32_t m = 1; m <= 4; m++) {
for (uint32_t n = 1; n <= 2; n++) {
@@ -49256,13 +49256,13 @@
.k(k)
.cm_stride(5)
.iterations(1)
- .Test(xnn_qu8_gemm_minmax_fp32_ukernel_4x2__scalar_lrint, xnn_init_qu8_conv_minmax_fp32_scalar_lrint_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
+ .Test(xnn_qu8_gemm_minmax_fp32_ukernel_4x2__scalar_lrintf, xnn_init_qu8_conv_minmax_fp32_scalar_lrintf_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
}
}
}
}
-TEST(QU8_GEMM_MINMAX_FP32_4X2__SCALAR_LRINT, qmin) {
+TEST(QU8_GEMM_MINMAX_FP32_4X2__SCALAR_LRINTF, qmin) {
GemmMicrokernelTester()
.mr(4)
.nr(2)
@@ -49272,10 +49272,10 @@
.n(2)
.k(1)
.qmin(128)
- .Test(xnn_qu8_gemm_minmax_fp32_ukernel_4x2__scalar_lrint, xnn_init_qu8_conv_minmax_fp32_scalar_lrint_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
+ .Test(xnn_qu8_gemm_minmax_fp32_ukernel_4x2__scalar_lrintf, xnn_init_qu8_conv_minmax_fp32_scalar_lrintf_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
}
-TEST(QU8_GEMM_MINMAX_FP32_4X2__SCALAR_LRINT, qmax) {
+TEST(QU8_GEMM_MINMAX_FP32_4X2__SCALAR_LRINTF, qmax) {
GemmMicrokernelTester()
.mr(4)
.nr(2)
@@ -49285,10 +49285,10 @@
.n(2)
.k(1)
.qmax(128)
- .Test(xnn_qu8_gemm_minmax_fp32_ukernel_4x2__scalar_lrint, xnn_init_qu8_conv_minmax_fp32_scalar_lrint_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
+ .Test(xnn_qu8_gemm_minmax_fp32_ukernel_4x2__scalar_lrintf, xnn_init_qu8_conv_minmax_fp32_scalar_lrintf_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
}
-TEST(QU8_GEMM_MINMAX_FP32_4X2__SCALAR_LRINT, strided_cm) {
+TEST(QU8_GEMM_MINMAX_FP32_4X2__SCALAR_LRINTF, strided_cm) {
GemmMicrokernelTester()
.mr(4)
.nr(2)
@@ -49298,10 +49298,10 @@
.n(2)
.k(1)
.cm_stride(5)
- .Test(xnn_qu8_gemm_minmax_fp32_ukernel_4x2__scalar_lrint, xnn_init_qu8_conv_minmax_fp32_scalar_lrint_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
+ .Test(xnn_qu8_gemm_minmax_fp32_ukernel_4x2__scalar_lrintf, xnn_init_qu8_conv_minmax_fp32_scalar_lrintf_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
}
-TEST(QU8_GEMM_MINMAX_FP32_4X2__SCALAR_LRINT, no_a_zero_point) {
+TEST(QU8_GEMM_MINMAX_FP32_4X2__SCALAR_LRINTF, no_a_zero_point) {
for (size_t k = 1; k <= 5; k += 2) {
GemmMicrokernelTester()
.mr(4)
@@ -49312,11 +49312,11 @@
.n(2)
.k(k)
.a_zero_point(0)
- .Test(xnn_qu8_gemm_minmax_fp32_ukernel_4x2__scalar_lrint, xnn_init_qu8_conv_minmax_fp32_scalar_lrint_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
+ .Test(xnn_qu8_gemm_minmax_fp32_ukernel_4x2__scalar_lrintf, xnn_init_qu8_conv_minmax_fp32_scalar_lrintf_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
}
}
-TEST(QU8_GEMM_MINMAX_FP32_4X2__SCALAR_LRINT, no_b_zero_point) {
+TEST(QU8_GEMM_MINMAX_FP32_4X2__SCALAR_LRINTF, no_b_zero_point) {
for (size_t k = 1; k <= 5; k += 2) {
GemmMicrokernelTester()
.mr(4)
@@ -49327,11 +49327,11 @@
.n(2)
.k(k)
.b_zero_point(0)
- .Test(xnn_qu8_gemm_minmax_fp32_ukernel_4x2__scalar_lrint, xnn_init_qu8_conv_minmax_fp32_scalar_lrint_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
+ .Test(xnn_qu8_gemm_minmax_fp32_ukernel_4x2__scalar_lrintf, xnn_init_qu8_conv_minmax_fp32_scalar_lrintf_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
}
}
-TEST(QU8_GEMM_MINMAX_FP32_4X2__SCALAR_LRINT, no_zero_point) {
+TEST(QU8_GEMM_MINMAX_FP32_4X2__SCALAR_LRINTF, no_zero_point) {
for (size_t k = 1; k <= 5; k += 2) {
GemmMicrokernelTester()
.mr(4)
@@ -49343,11 +49343,11 @@
.k(k)
.a_zero_point(0)
.b_zero_point(0)
- .Test(xnn_qu8_gemm_minmax_fp32_ukernel_4x2__scalar_lrint, xnn_init_qu8_conv_minmax_fp32_scalar_lrint_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
+ .Test(xnn_qu8_gemm_minmax_fp32_ukernel_4x2__scalar_lrintf, xnn_init_qu8_conv_minmax_fp32_scalar_lrintf_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
}
}
-TEST(QU8_GEMM_MINMAX_FP32_1X4__SCALAR_LRINT, k_eq_1) {
+TEST(QU8_GEMM_MINMAX_FP32_1X4__SCALAR_LRINTF, k_eq_1) {
GemmMicrokernelTester()
.mr(1)
.nr(4)
@@ -49356,10 +49356,10 @@
.m(1)
.n(4)
.k(1)
- .Test(xnn_qu8_gemm_minmax_fp32_ukernel_1x4__scalar_lrint, xnn_init_qu8_conv_minmax_fp32_scalar_lrint_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
+ .Test(xnn_qu8_gemm_minmax_fp32_ukernel_1x4__scalar_lrintf, xnn_init_qu8_conv_minmax_fp32_scalar_lrintf_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
}
-TEST(QU8_GEMM_MINMAX_FP32_1X4__SCALAR_LRINT, strided_cn) {
+TEST(QU8_GEMM_MINMAX_FP32_1X4__SCALAR_LRINTF, strided_cn) {
GemmMicrokernelTester()
.mr(1)
.nr(4)
@@ -49369,10 +49369,10 @@
.n(4)
.k(1)
.cn_stride(7)
- .Test(xnn_qu8_gemm_minmax_fp32_ukernel_1x4__scalar_lrint, xnn_init_qu8_conv_minmax_fp32_scalar_lrint_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
+ .Test(xnn_qu8_gemm_minmax_fp32_ukernel_1x4__scalar_lrintf, xnn_init_qu8_conv_minmax_fp32_scalar_lrintf_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
}
-TEST(QU8_GEMM_MINMAX_FP32_1X4__SCALAR_LRINT, k_eq_1_strided_a) {
+TEST(QU8_GEMM_MINMAX_FP32_1X4__SCALAR_LRINTF, k_eq_1_strided_a) {
GemmMicrokernelTester()
.mr(1)
.nr(4)
@@ -49382,10 +49382,10 @@
.n(4)
.k(1)
.a_stride(3)
- .Test(xnn_qu8_gemm_minmax_fp32_ukernel_1x4__scalar_lrint, xnn_init_qu8_conv_minmax_fp32_scalar_lrint_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
+ .Test(xnn_qu8_gemm_minmax_fp32_ukernel_1x4__scalar_lrintf, xnn_init_qu8_conv_minmax_fp32_scalar_lrintf_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
}
-TEST(QU8_GEMM_MINMAX_FP32_1X4__SCALAR_LRINT, k_eq_1_subtile) {
+TEST(QU8_GEMM_MINMAX_FP32_1X4__SCALAR_LRINTF, k_eq_1_subtile) {
for (uint32_t m = 1; m <= 1; m++) {
for (uint32_t n = 1; n <= 4; n++) {
GemmMicrokernelTester()
@@ -49397,12 +49397,12 @@
.n(n)
.k(1)
.iterations(1)
- .Test(xnn_qu8_gemm_minmax_fp32_ukernel_1x4__scalar_lrint, xnn_init_qu8_conv_minmax_fp32_scalar_lrint_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
+ .Test(xnn_qu8_gemm_minmax_fp32_ukernel_1x4__scalar_lrintf, xnn_init_qu8_conv_minmax_fp32_scalar_lrintf_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
}
}
}
-TEST(QU8_GEMM_MINMAX_FP32_1X4__SCALAR_LRINT, k_eq_1_subtile_m) {
+TEST(QU8_GEMM_MINMAX_FP32_1X4__SCALAR_LRINTF, k_eq_1_subtile_m) {
for (uint32_t m = 1; m <= 1; m++) {
GemmMicrokernelTester()
.mr(1)
@@ -49413,11 +49413,11 @@
.n(4)
.k(1)
.iterations(1)
- .Test(xnn_qu8_gemm_minmax_fp32_ukernel_1x4__scalar_lrint, xnn_init_qu8_conv_minmax_fp32_scalar_lrint_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
+ .Test(xnn_qu8_gemm_minmax_fp32_ukernel_1x4__scalar_lrintf, xnn_init_qu8_conv_minmax_fp32_scalar_lrintf_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
}
}
-TEST(QU8_GEMM_MINMAX_FP32_1X4__SCALAR_LRINT, k_eq_1_subtile_n) {
+TEST(QU8_GEMM_MINMAX_FP32_1X4__SCALAR_LRINTF, k_eq_1_subtile_n) {
for (uint32_t n = 1; n <= 4; n++) {
GemmMicrokernelTester()
.mr(1)
@@ -49428,11 +49428,11 @@
.n(n)
.k(1)
.iterations(1)
- .Test(xnn_qu8_gemm_minmax_fp32_ukernel_1x4__scalar_lrint, xnn_init_qu8_conv_minmax_fp32_scalar_lrint_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
+ .Test(xnn_qu8_gemm_minmax_fp32_ukernel_1x4__scalar_lrintf, xnn_init_qu8_conv_minmax_fp32_scalar_lrintf_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
}
}
-TEST(QU8_GEMM_MINMAX_FP32_1X4__SCALAR_LRINT, k_gt_1) {
+TEST(QU8_GEMM_MINMAX_FP32_1X4__SCALAR_LRINTF, k_gt_1) {
for (size_t k = 2; k < 10; k++) {
GemmMicrokernelTester()
.mr(1)
@@ -49442,11 +49442,11 @@
.m(1)
.n(4)
.k(k)
- .Test(xnn_qu8_gemm_minmax_fp32_ukernel_1x4__scalar_lrint, xnn_init_qu8_conv_minmax_fp32_scalar_lrint_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
+ .Test(xnn_qu8_gemm_minmax_fp32_ukernel_1x4__scalar_lrintf, xnn_init_qu8_conv_minmax_fp32_scalar_lrintf_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
}
}
-TEST(QU8_GEMM_MINMAX_FP32_1X4__SCALAR_LRINT, k_gt_1_strided_a) {
+TEST(QU8_GEMM_MINMAX_FP32_1X4__SCALAR_LRINTF, k_gt_1_strided_a) {
for (size_t k = 2; k < 10; k++) {
GemmMicrokernelTester()
.mr(1)
@@ -49457,11 +49457,11 @@
.n(4)
.k(k)
.a_stride(11)
- .Test(xnn_qu8_gemm_minmax_fp32_ukernel_1x4__scalar_lrint, xnn_init_qu8_conv_minmax_fp32_scalar_lrint_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
+ .Test(xnn_qu8_gemm_minmax_fp32_ukernel_1x4__scalar_lrintf, xnn_init_qu8_conv_minmax_fp32_scalar_lrintf_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
}
}
-TEST(QU8_GEMM_MINMAX_FP32_1X4__SCALAR_LRINT, k_gt_1_subtile) {
+TEST(QU8_GEMM_MINMAX_FP32_1X4__SCALAR_LRINTF, k_gt_1_subtile) {
for (size_t k = 2; k < 10; k++) {
for (uint32_t m = 1; m <= 1; m++) {
for (uint32_t n = 1; n <= 4; n++) {
@@ -49474,13 +49474,13 @@
.n(n)
.k(k)
.iterations(1)
- .Test(xnn_qu8_gemm_minmax_fp32_ukernel_1x4__scalar_lrint, xnn_init_qu8_conv_minmax_fp32_scalar_lrint_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
+ .Test(xnn_qu8_gemm_minmax_fp32_ukernel_1x4__scalar_lrintf, xnn_init_qu8_conv_minmax_fp32_scalar_lrintf_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
}
}
}
}
-TEST(QU8_GEMM_MINMAX_FP32_1X4__SCALAR_LRINT, n_gt_4) {
+TEST(QU8_GEMM_MINMAX_FP32_1X4__SCALAR_LRINTF, n_gt_4) {
for (uint32_t n = 5; n < 8; n++) {
for (size_t k = 1; k <= 5; k += 2) {
GemmMicrokernelTester()
@@ -49491,12 +49491,12 @@
.m(1)
.n(4)
.k(k)
- .Test(xnn_qu8_gemm_minmax_fp32_ukernel_1x4__scalar_lrint, xnn_init_qu8_conv_minmax_fp32_scalar_lrint_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
+ .Test(xnn_qu8_gemm_minmax_fp32_ukernel_1x4__scalar_lrintf, xnn_init_qu8_conv_minmax_fp32_scalar_lrintf_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
}
}
}
-TEST(QU8_GEMM_MINMAX_FP32_1X4__SCALAR_LRINT, n_gt_4_strided_cn) {
+TEST(QU8_GEMM_MINMAX_FP32_1X4__SCALAR_LRINTF, n_gt_4_strided_cn) {
for (uint32_t n = 5; n < 8; n++) {
for (size_t k = 1; k <= 5; k += 2) {
GemmMicrokernelTester()
@@ -49508,12 +49508,12 @@
.n(4)
.k(k)
.cn_stride(7)
- .Test(xnn_qu8_gemm_minmax_fp32_ukernel_1x4__scalar_lrint, xnn_init_qu8_conv_minmax_fp32_scalar_lrint_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
+ .Test(xnn_qu8_gemm_minmax_fp32_ukernel_1x4__scalar_lrintf, xnn_init_qu8_conv_minmax_fp32_scalar_lrintf_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
}
}
}
-TEST(QU8_GEMM_MINMAX_FP32_1X4__SCALAR_LRINT, n_gt_4_strided_a) {
+TEST(QU8_GEMM_MINMAX_FP32_1X4__SCALAR_LRINTF, n_gt_4_strided_a) {
for (uint32_t n = 5; n < 8; n++) {
for (size_t k = 1; k <= 5; k += 2) {
GemmMicrokernelTester()
@@ -49525,12 +49525,12 @@
.n(n)
.k(k)
.a_stride(7)
- .Test(xnn_qu8_gemm_minmax_fp32_ukernel_1x4__scalar_lrint, xnn_init_qu8_conv_minmax_fp32_scalar_lrint_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
+ .Test(xnn_qu8_gemm_minmax_fp32_ukernel_1x4__scalar_lrintf, xnn_init_qu8_conv_minmax_fp32_scalar_lrintf_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
}
}
}
-TEST(QU8_GEMM_MINMAX_FP32_1X4__SCALAR_LRINT, n_gt_4_subtile) {
+TEST(QU8_GEMM_MINMAX_FP32_1X4__SCALAR_LRINTF, n_gt_4_subtile) {
for (uint32_t n = 5; n < 8; n++) {
for (size_t k = 1; k <= 5; k += 2) {
for (uint32_t m = 1; m <= 1; m++) {
@@ -49543,13 +49543,13 @@
.n(n)
.k(k)
.iterations(1)
- .Test(xnn_qu8_gemm_minmax_fp32_ukernel_1x4__scalar_lrint, xnn_init_qu8_conv_minmax_fp32_scalar_lrint_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
+ .Test(xnn_qu8_gemm_minmax_fp32_ukernel_1x4__scalar_lrintf, xnn_init_qu8_conv_minmax_fp32_scalar_lrintf_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
}
}
}
}
-TEST(QU8_GEMM_MINMAX_FP32_1X4__SCALAR_LRINT, n_div_4) {
+TEST(QU8_GEMM_MINMAX_FP32_1X4__SCALAR_LRINTF, n_div_4) {
for (uint32_t n = 8; n <= 12; n += 4) {
for (size_t k = 1; k <= 5; k += 2) {
GemmMicrokernelTester()
@@ -49560,12 +49560,12 @@
.m(1)
.n(4)
.k(k)
- .Test(xnn_qu8_gemm_minmax_fp32_ukernel_1x4__scalar_lrint, xnn_init_qu8_conv_minmax_fp32_scalar_lrint_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
+ .Test(xnn_qu8_gemm_minmax_fp32_ukernel_1x4__scalar_lrintf, xnn_init_qu8_conv_minmax_fp32_scalar_lrintf_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
}
}
}
-TEST(QU8_GEMM_MINMAX_FP32_1X4__SCALAR_LRINT, n_div_4_strided_cn) {
+TEST(QU8_GEMM_MINMAX_FP32_1X4__SCALAR_LRINTF, n_div_4_strided_cn) {
for (uint32_t n = 8; n <= 12; n += 4) {
for (size_t k = 1; k <= 5; k += 2) {
GemmMicrokernelTester()
@@ -49577,12 +49577,12 @@
.n(n)
.k(k)
.cn_stride(7)
- .Test(xnn_qu8_gemm_minmax_fp32_ukernel_1x4__scalar_lrint, xnn_init_qu8_conv_minmax_fp32_scalar_lrint_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
+ .Test(xnn_qu8_gemm_minmax_fp32_ukernel_1x4__scalar_lrintf, xnn_init_qu8_conv_minmax_fp32_scalar_lrintf_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
}
}
}
-TEST(QU8_GEMM_MINMAX_FP32_1X4__SCALAR_LRINT, n_div_4_strided_a) {
+TEST(QU8_GEMM_MINMAX_FP32_1X4__SCALAR_LRINTF, n_div_4_strided_a) {
for (uint32_t n = 8; n <= 12; n += 4) {
for (size_t k = 1; k <= 5; k += 2) {
GemmMicrokernelTester()
@@ -49594,12 +49594,12 @@
.n(n)
.k(k)
.a_stride(7)
- .Test(xnn_qu8_gemm_minmax_fp32_ukernel_1x4__scalar_lrint, xnn_init_qu8_conv_minmax_fp32_scalar_lrint_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
+ .Test(xnn_qu8_gemm_minmax_fp32_ukernel_1x4__scalar_lrintf, xnn_init_qu8_conv_minmax_fp32_scalar_lrintf_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
}
}
}
-TEST(QU8_GEMM_MINMAX_FP32_1X4__SCALAR_LRINT, n_div_4_subtile) {
+TEST(QU8_GEMM_MINMAX_FP32_1X4__SCALAR_LRINTF, n_div_4_subtile) {
for (uint32_t n = 8; n <= 12; n += 4) {
for (size_t k = 1; k <= 5; k += 2) {
for (uint32_t m = 1; m <= 1; m++) {
@@ -49612,13 +49612,13 @@
.n(n)
.k(k)
.iterations(1)
- .Test(xnn_qu8_gemm_minmax_fp32_ukernel_1x4__scalar_lrint, xnn_init_qu8_conv_minmax_fp32_scalar_lrint_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
+ .Test(xnn_qu8_gemm_minmax_fp32_ukernel_1x4__scalar_lrintf, xnn_init_qu8_conv_minmax_fp32_scalar_lrintf_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
}
}
}
}
-TEST(QU8_GEMM_MINMAX_FP32_1X4__SCALAR_LRINT, strided_cm_subtile) {
+TEST(QU8_GEMM_MINMAX_FP32_1X4__SCALAR_LRINTF, strided_cm_subtile) {
for (size_t k = 1; k <= 5; k += 2) {
for (uint32_t m = 1; m <= 1; m++) {
for (uint32_t n = 1; n <= 4; n++) {
@@ -49632,13 +49632,13 @@
.k(k)
.cm_stride(7)
.iterations(1)
- .Test(xnn_qu8_gemm_minmax_fp32_ukernel_1x4__scalar_lrint, xnn_init_qu8_conv_minmax_fp32_scalar_lrint_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
+ .Test(xnn_qu8_gemm_minmax_fp32_ukernel_1x4__scalar_lrintf, xnn_init_qu8_conv_minmax_fp32_scalar_lrintf_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
}
}
}
}
-TEST(QU8_GEMM_MINMAX_FP32_1X4__SCALAR_LRINT, qmin) {
+TEST(QU8_GEMM_MINMAX_FP32_1X4__SCALAR_LRINTF, qmin) {
GemmMicrokernelTester()
.mr(1)
.nr(4)
@@ -49648,10 +49648,10 @@
.n(4)
.k(1)
.qmin(128)
- .Test(xnn_qu8_gemm_minmax_fp32_ukernel_1x4__scalar_lrint, xnn_init_qu8_conv_minmax_fp32_scalar_lrint_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
+ .Test(xnn_qu8_gemm_minmax_fp32_ukernel_1x4__scalar_lrintf, xnn_init_qu8_conv_minmax_fp32_scalar_lrintf_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
}
-TEST(QU8_GEMM_MINMAX_FP32_1X4__SCALAR_LRINT, qmax) {
+TEST(QU8_GEMM_MINMAX_FP32_1X4__SCALAR_LRINTF, qmax) {
GemmMicrokernelTester()
.mr(1)
.nr(4)
@@ -49661,10 +49661,10 @@
.n(4)
.k(1)
.qmax(128)
- .Test(xnn_qu8_gemm_minmax_fp32_ukernel_1x4__scalar_lrint, xnn_init_qu8_conv_minmax_fp32_scalar_lrint_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
+ .Test(xnn_qu8_gemm_minmax_fp32_ukernel_1x4__scalar_lrintf, xnn_init_qu8_conv_minmax_fp32_scalar_lrintf_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
}
-TEST(QU8_GEMM_MINMAX_FP32_1X4__SCALAR_LRINT, strided_cm) {
+TEST(QU8_GEMM_MINMAX_FP32_1X4__SCALAR_LRINTF, strided_cm) {
GemmMicrokernelTester()
.mr(1)
.nr(4)
@@ -49674,10 +49674,10 @@
.n(4)
.k(1)
.cm_stride(7)
- .Test(xnn_qu8_gemm_minmax_fp32_ukernel_1x4__scalar_lrint, xnn_init_qu8_conv_minmax_fp32_scalar_lrint_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
+ .Test(xnn_qu8_gemm_minmax_fp32_ukernel_1x4__scalar_lrintf, xnn_init_qu8_conv_minmax_fp32_scalar_lrintf_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
}
-TEST(QU8_GEMM_MINMAX_FP32_1X4__SCALAR_LRINT, no_a_zero_point) {
+TEST(QU8_GEMM_MINMAX_FP32_1X4__SCALAR_LRINTF, no_a_zero_point) {
for (size_t k = 1; k <= 5; k += 2) {
GemmMicrokernelTester()
.mr(1)
@@ -49688,11 +49688,11 @@
.n(4)
.k(k)
.a_zero_point(0)
- .Test(xnn_qu8_gemm_minmax_fp32_ukernel_1x4__scalar_lrint, xnn_init_qu8_conv_minmax_fp32_scalar_lrint_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
+ .Test(xnn_qu8_gemm_minmax_fp32_ukernel_1x4__scalar_lrintf, xnn_init_qu8_conv_minmax_fp32_scalar_lrintf_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
}
}
-TEST(QU8_GEMM_MINMAX_FP32_1X4__SCALAR_LRINT, no_b_zero_point) {
+TEST(QU8_GEMM_MINMAX_FP32_1X4__SCALAR_LRINTF, no_b_zero_point) {
for (size_t k = 1; k <= 5; k += 2) {
GemmMicrokernelTester()
.mr(1)
@@ -49703,11 +49703,11 @@
.n(4)
.k(k)
.b_zero_point(0)
- .Test(xnn_qu8_gemm_minmax_fp32_ukernel_1x4__scalar_lrint, xnn_init_qu8_conv_minmax_fp32_scalar_lrint_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
+ .Test(xnn_qu8_gemm_minmax_fp32_ukernel_1x4__scalar_lrintf, xnn_init_qu8_conv_minmax_fp32_scalar_lrintf_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
}
}
-TEST(QU8_GEMM_MINMAX_FP32_1X4__SCALAR_LRINT, no_zero_point) {
+TEST(QU8_GEMM_MINMAX_FP32_1X4__SCALAR_LRINTF, no_zero_point) {
for (size_t k = 1; k <= 5; k += 2) {
GemmMicrokernelTester()
.mr(1)
@@ -49719,11 +49719,11 @@
.k(k)
.a_zero_point(0)
.b_zero_point(0)
- .Test(xnn_qu8_gemm_minmax_fp32_ukernel_1x4__scalar_lrint, xnn_init_qu8_conv_minmax_fp32_scalar_lrint_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
+ .Test(xnn_qu8_gemm_minmax_fp32_ukernel_1x4__scalar_lrintf, xnn_init_qu8_conv_minmax_fp32_scalar_lrintf_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
}
}
-TEST(QU8_GEMM_MINMAX_FP32_2X4__SCALAR_LRINT, k_eq_1) {
+TEST(QU8_GEMM_MINMAX_FP32_2X4__SCALAR_LRINTF, k_eq_1) {
GemmMicrokernelTester()
.mr(2)
.nr(4)
@@ -49732,10 +49732,10 @@
.m(2)
.n(4)
.k(1)
- .Test(xnn_qu8_gemm_minmax_fp32_ukernel_2x4__scalar_lrint, xnn_init_qu8_conv_minmax_fp32_scalar_lrint_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
+ .Test(xnn_qu8_gemm_minmax_fp32_ukernel_2x4__scalar_lrintf, xnn_init_qu8_conv_minmax_fp32_scalar_lrintf_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
}
-TEST(QU8_GEMM_MINMAX_FP32_2X4__SCALAR_LRINT, strided_cn) {
+TEST(QU8_GEMM_MINMAX_FP32_2X4__SCALAR_LRINTF, strided_cn) {
GemmMicrokernelTester()
.mr(2)
.nr(4)
@@ -49745,10 +49745,10 @@
.n(4)
.k(1)
.cn_stride(7)
- .Test(xnn_qu8_gemm_minmax_fp32_ukernel_2x4__scalar_lrint, xnn_init_qu8_conv_minmax_fp32_scalar_lrint_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
+ .Test(xnn_qu8_gemm_minmax_fp32_ukernel_2x4__scalar_lrintf, xnn_init_qu8_conv_minmax_fp32_scalar_lrintf_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
}
-TEST(QU8_GEMM_MINMAX_FP32_2X4__SCALAR_LRINT, k_eq_1_strided_a) {
+TEST(QU8_GEMM_MINMAX_FP32_2X4__SCALAR_LRINTF, k_eq_1_strided_a) {
GemmMicrokernelTester()
.mr(2)
.nr(4)
@@ -49758,10 +49758,10 @@
.n(4)
.k(1)
.a_stride(3)
- .Test(xnn_qu8_gemm_minmax_fp32_ukernel_2x4__scalar_lrint, xnn_init_qu8_conv_minmax_fp32_scalar_lrint_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
+ .Test(xnn_qu8_gemm_minmax_fp32_ukernel_2x4__scalar_lrintf, xnn_init_qu8_conv_minmax_fp32_scalar_lrintf_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
}
-TEST(QU8_GEMM_MINMAX_FP32_2X4__SCALAR_LRINT, k_eq_1_subtile) {
+TEST(QU8_GEMM_MINMAX_FP32_2X4__SCALAR_LRINTF, k_eq_1_subtile) {
for (uint32_t m = 1; m <= 2; m++) {
for (uint32_t n = 1; n <= 4; n++) {
GemmMicrokernelTester()
@@ -49773,12 +49773,12 @@
.n(n)
.k(1)
.iterations(1)
- .Test(xnn_qu8_gemm_minmax_fp32_ukernel_2x4__scalar_lrint, xnn_init_qu8_conv_minmax_fp32_scalar_lrint_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
+ .Test(xnn_qu8_gemm_minmax_fp32_ukernel_2x4__scalar_lrintf, xnn_init_qu8_conv_minmax_fp32_scalar_lrintf_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
}
}
}
-TEST(QU8_GEMM_MINMAX_FP32_2X4__SCALAR_LRINT, k_eq_1_subtile_m) {
+TEST(QU8_GEMM_MINMAX_FP32_2X4__SCALAR_LRINTF, k_eq_1_subtile_m) {
for (uint32_t m = 1; m <= 2; m++) {
GemmMicrokernelTester()
.mr(2)
@@ -49789,11 +49789,11 @@
.n(4)
.k(1)
.iterations(1)
- .Test(xnn_qu8_gemm_minmax_fp32_ukernel_2x4__scalar_lrint, xnn_init_qu8_conv_minmax_fp32_scalar_lrint_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
+ .Test(xnn_qu8_gemm_minmax_fp32_ukernel_2x4__scalar_lrintf, xnn_init_qu8_conv_minmax_fp32_scalar_lrintf_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
}
}
-TEST(QU8_GEMM_MINMAX_FP32_2X4__SCALAR_LRINT, k_eq_1_subtile_n) {
+TEST(QU8_GEMM_MINMAX_FP32_2X4__SCALAR_LRINTF, k_eq_1_subtile_n) {
for (uint32_t n = 1; n <= 4; n++) {
GemmMicrokernelTester()
.mr(2)
@@ -49804,11 +49804,11 @@
.n(n)
.k(1)
.iterations(1)
- .Test(xnn_qu8_gemm_minmax_fp32_ukernel_2x4__scalar_lrint, xnn_init_qu8_conv_minmax_fp32_scalar_lrint_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
+ .Test(xnn_qu8_gemm_minmax_fp32_ukernel_2x4__scalar_lrintf, xnn_init_qu8_conv_minmax_fp32_scalar_lrintf_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
}
}
-TEST(QU8_GEMM_MINMAX_FP32_2X4__SCALAR_LRINT, k_gt_1) {
+TEST(QU8_GEMM_MINMAX_FP32_2X4__SCALAR_LRINTF, k_gt_1) {
for (size_t k = 2; k < 10; k++) {
GemmMicrokernelTester()
.mr(2)
@@ -49818,11 +49818,11 @@
.m(2)
.n(4)
.k(k)
- .Test(xnn_qu8_gemm_minmax_fp32_ukernel_2x4__scalar_lrint, xnn_init_qu8_conv_minmax_fp32_scalar_lrint_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
+ .Test(xnn_qu8_gemm_minmax_fp32_ukernel_2x4__scalar_lrintf, xnn_init_qu8_conv_minmax_fp32_scalar_lrintf_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
}
}
-TEST(QU8_GEMM_MINMAX_FP32_2X4__SCALAR_LRINT, k_gt_1_strided_a) {
+TEST(QU8_GEMM_MINMAX_FP32_2X4__SCALAR_LRINTF, k_gt_1_strided_a) {
for (size_t k = 2; k < 10; k++) {
GemmMicrokernelTester()
.mr(2)
@@ -49833,11 +49833,11 @@
.n(4)
.k(k)
.a_stride(11)
- .Test(xnn_qu8_gemm_minmax_fp32_ukernel_2x4__scalar_lrint, xnn_init_qu8_conv_minmax_fp32_scalar_lrint_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
+ .Test(xnn_qu8_gemm_minmax_fp32_ukernel_2x4__scalar_lrintf, xnn_init_qu8_conv_minmax_fp32_scalar_lrintf_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
}
}
-TEST(QU8_GEMM_MINMAX_FP32_2X4__SCALAR_LRINT, k_gt_1_subtile) {
+TEST(QU8_GEMM_MINMAX_FP32_2X4__SCALAR_LRINTF, k_gt_1_subtile) {
for (size_t k = 2; k < 10; k++) {
for (uint32_t m = 1; m <= 2; m++) {
for (uint32_t n = 1; n <= 4; n++) {
@@ -49850,13 +49850,13 @@
.n(n)
.k(k)
.iterations(1)
- .Test(xnn_qu8_gemm_minmax_fp32_ukernel_2x4__scalar_lrint, xnn_init_qu8_conv_minmax_fp32_scalar_lrint_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
+ .Test(xnn_qu8_gemm_minmax_fp32_ukernel_2x4__scalar_lrintf, xnn_init_qu8_conv_minmax_fp32_scalar_lrintf_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
}
}
}
}
-TEST(QU8_GEMM_MINMAX_FP32_2X4__SCALAR_LRINT, n_gt_4) {
+TEST(QU8_GEMM_MINMAX_FP32_2X4__SCALAR_LRINTF, n_gt_4) {
for (uint32_t n = 5; n < 8; n++) {
for (size_t k = 1; k <= 5; k += 2) {
GemmMicrokernelTester()
@@ -49867,12 +49867,12 @@
.m(2)
.n(4)
.k(k)
- .Test(xnn_qu8_gemm_minmax_fp32_ukernel_2x4__scalar_lrint, xnn_init_qu8_conv_minmax_fp32_scalar_lrint_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
+ .Test(xnn_qu8_gemm_minmax_fp32_ukernel_2x4__scalar_lrintf, xnn_init_qu8_conv_minmax_fp32_scalar_lrintf_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
}
}
}
-TEST(QU8_GEMM_MINMAX_FP32_2X4__SCALAR_LRINT, n_gt_4_strided_cn) {
+TEST(QU8_GEMM_MINMAX_FP32_2X4__SCALAR_LRINTF, n_gt_4_strided_cn) {
for (uint32_t n = 5; n < 8; n++) {
for (size_t k = 1; k <= 5; k += 2) {
GemmMicrokernelTester()
@@ -49884,12 +49884,12 @@
.n(4)
.k(k)
.cn_stride(7)
- .Test(xnn_qu8_gemm_minmax_fp32_ukernel_2x4__scalar_lrint, xnn_init_qu8_conv_minmax_fp32_scalar_lrint_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
+ .Test(xnn_qu8_gemm_minmax_fp32_ukernel_2x4__scalar_lrintf, xnn_init_qu8_conv_minmax_fp32_scalar_lrintf_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
}
}
}
-TEST(QU8_GEMM_MINMAX_FP32_2X4__SCALAR_LRINT, n_gt_4_strided_a) {
+TEST(QU8_GEMM_MINMAX_FP32_2X4__SCALAR_LRINTF, n_gt_4_strided_a) {
for (uint32_t n = 5; n < 8; n++) {
for (size_t k = 1; k <= 5; k += 2) {
GemmMicrokernelTester()
@@ -49901,12 +49901,12 @@
.n(n)
.k(k)
.a_stride(7)
- .Test(xnn_qu8_gemm_minmax_fp32_ukernel_2x4__scalar_lrint, xnn_init_qu8_conv_minmax_fp32_scalar_lrint_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
+ .Test(xnn_qu8_gemm_minmax_fp32_ukernel_2x4__scalar_lrintf, xnn_init_qu8_conv_minmax_fp32_scalar_lrintf_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
}
}
}
-TEST(QU8_GEMM_MINMAX_FP32_2X4__SCALAR_LRINT, n_gt_4_subtile) {
+TEST(QU8_GEMM_MINMAX_FP32_2X4__SCALAR_LRINTF, n_gt_4_subtile) {
for (uint32_t n = 5; n < 8; n++) {
for (size_t k = 1; k <= 5; k += 2) {
for (uint32_t m = 1; m <= 2; m++) {
@@ -49919,13 +49919,13 @@
.n(n)
.k(k)
.iterations(1)
- .Test(xnn_qu8_gemm_minmax_fp32_ukernel_2x4__scalar_lrint, xnn_init_qu8_conv_minmax_fp32_scalar_lrint_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
+ .Test(xnn_qu8_gemm_minmax_fp32_ukernel_2x4__scalar_lrintf, xnn_init_qu8_conv_minmax_fp32_scalar_lrintf_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
}
}
}
}
-TEST(QU8_GEMM_MINMAX_FP32_2X4__SCALAR_LRINT, n_div_4) {
+TEST(QU8_GEMM_MINMAX_FP32_2X4__SCALAR_LRINTF, n_div_4) {
for (uint32_t n = 8; n <= 12; n += 4) {
for (size_t k = 1; k <= 5; k += 2) {
GemmMicrokernelTester()
@@ -49936,12 +49936,12 @@
.m(2)
.n(4)
.k(k)
- .Test(xnn_qu8_gemm_minmax_fp32_ukernel_2x4__scalar_lrint, xnn_init_qu8_conv_minmax_fp32_scalar_lrint_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
+ .Test(xnn_qu8_gemm_minmax_fp32_ukernel_2x4__scalar_lrintf, xnn_init_qu8_conv_minmax_fp32_scalar_lrintf_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
}
}
}
-TEST(QU8_GEMM_MINMAX_FP32_2X4__SCALAR_LRINT, n_div_4_strided_cn) {
+TEST(QU8_GEMM_MINMAX_FP32_2X4__SCALAR_LRINTF, n_div_4_strided_cn) {
for (uint32_t n = 8; n <= 12; n += 4) {
for (size_t k = 1; k <= 5; k += 2) {
GemmMicrokernelTester()
@@ -49953,12 +49953,12 @@
.n(n)
.k(k)
.cn_stride(7)
- .Test(xnn_qu8_gemm_minmax_fp32_ukernel_2x4__scalar_lrint, xnn_init_qu8_conv_minmax_fp32_scalar_lrint_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
+ .Test(xnn_qu8_gemm_minmax_fp32_ukernel_2x4__scalar_lrintf, xnn_init_qu8_conv_minmax_fp32_scalar_lrintf_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
}
}
}
-TEST(QU8_GEMM_MINMAX_FP32_2X4__SCALAR_LRINT, n_div_4_strided_a) {
+TEST(QU8_GEMM_MINMAX_FP32_2X4__SCALAR_LRINTF, n_div_4_strided_a) {
for (uint32_t n = 8; n <= 12; n += 4) {
for (size_t k = 1; k <= 5; k += 2) {
GemmMicrokernelTester()
@@ -49970,12 +49970,12 @@
.n(n)
.k(k)
.a_stride(7)
- .Test(xnn_qu8_gemm_minmax_fp32_ukernel_2x4__scalar_lrint, xnn_init_qu8_conv_minmax_fp32_scalar_lrint_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
+ .Test(xnn_qu8_gemm_minmax_fp32_ukernel_2x4__scalar_lrintf, xnn_init_qu8_conv_minmax_fp32_scalar_lrintf_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
}
}
}
-TEST(QU8_GEMM_MINMAX_FP32_2X4__SCALAR_LRINT, n_div_4_subtile) {
+TEST(QU8_GEMM_MINMAX_FP32_2X4__SCALAR_LRINTF, n_div_4_subtile) {
for (uint32_t n = 8; n <= 12; n += 4) {
for (size_t k = 1; k <= 5; k += 2) {
for (uint32_t m = 1; m <= 2; m++) {
@@ -49988,13 +49988,13 @@
.n(n)
.k(k)
.iterations(1)
- .Test(xnn_qu8_gemm_minmax_fp32_ukernel_2x4__scalar_lrint, xnn_init_qu8_conv_minmax_fp32_scalar_lrint_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
+ .Test(xnn_qu8_gemm_minmax_fp32_ukernel_2x4__scalar_lrintf, xnn_init_qu8_conv_minmax_fp32_scalar_lrintf_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
}
}
}
}
-TEST(QU8_GEMM_MINMAX_FP32_2X4__SCALAR_LRINT, strided_cm_subtile) {
+TEST(QU8_GEMM_MINMAX_FP32_2X4__SCALAR_LRINTF, strided_cm_subtile) {
for (size_t k = 1; k <= 5; k += 2) {
for (uint32_t m = 1; m <= 2; m++) {
for (uint32_t n = 1; n <= 4; n++) {
@@ -50008,13 +50008,13 @@
.k(k)
.cm_stride(7)
.iterations(1)
- .Test(xnn_qu8_gemm_minmax_fp32_ukernel_2x4__scalar_lrint, xnn_init_qu8_conv_minmax_fp32_scalar_lrint_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
+ .Test(xnn_qu8_gemm_minmax_fp32_ukernel_2x4__scalar_lrintf, xnn_init_qu8_conv_minmax_fp32_scalar_lrintf_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
}
}
}
}
-TEST(QU8_GEMM_MINMAX_FP32_2X4__SCALAR_LRINT, qmin) {
+TEST(QU8_GEMM_MINMAX_FP32_2X4__SCALAR_LRINTF, qmin) {
GemmMicrokernelTester()
.mr(2)
.nr(4)
@@ -50024,10 +50024,10 @@
.n(4)
.k(1)
.qmin(128)
- .Test(xnn_qu8_gemm_minmax_fp32_ukernel_2x4__scalar_lrint, xnn_init_qu8_conv_minmax_fp32_scalar_lrint_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
+ .Test(xnn_qu8_gemm_minmax_fp32_ukernel_2x4__scalar_lrintf, xnn_init_qu8_conv_minmax_fp32_scalar_lrintf_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
}
-TEST(QU8_GEMM_MINMAX_FP32_2X4__SCALAR_LRINT, qmax) {
+TEST(QU8_GEMM_MINMAX_FP32_2X4__SCALAR_LRINTF, qmax) {
GemmMicrokernelTester()
.mr(2)
.nr(4)
@@ -50037,10 +50037,10 @@
.n(4)
.k(1)
.qmax(128)
- .Test(xnn_qu8_gemm_minmax_fp32_ukernel_2x4__scalar_lrint, xnn_init_qu8_conv_minmax_fp32_scalar_lrint_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
+ .Test(xnn_qu8_gemm_minmax_fp32_ukernel_2x4__scalar_lrintf, xnn_init_qu8_conv_minmax_fp32_scalar_lrintf_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
}
-TEST(QU8_GEMM_MINMAX_FP32_2X4__SCALAR_LRINT, strided_cm) {
+TEST(QU8_GEMM_MINMAX_FP32_2X4__SCALAR_LRINTF, strided_cm) {
GemmMicrokernelTester()
.mr(2)
.nr(4)
@@ -50050,10 +50050,10 @@
.n(4)
.k(1)
.cm_stride(7)
- .Test(xnn_qu8_gemm_minmax_fp32_ukernel_2x4__scalar_lrint, xnn_init_qu8_conv_minmax_fp32_scalar_lrint_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
+ .Test(xnn_qu8_gemm_minmax_fp32_ukernel_2x4__scalar_lrintf, xnn_init_qu8_conv_minmax_fp32_scalar_lrintf_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
}
-TEST(QU8_GEMM_MINMAX_FP32_2X4__SCALAR_LRINT, no_a_zero_point) {
+TEST(QU8_GEMM_MINMAX_FP32_2X4__SCALAR_LRINTF, no_a_zero_point) {
for (size_t k = 1; k <= 5; k += 2) {
GemmMicrokernelTester()
.mr(2)
@@ -50064,11 +50064,11 @@
.n(4)
.k(k)
.a_zero_point(0)
- .Test(xnn_qu8_gemm_minmax_fp32_ukernel_2x4__scalar_lrint, xnn_init_qu8_conv_minmax_fp32_scalar_lrint_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
+ .Test(xnn_qu8_gemm_minmax_fp32_ukernel_2x4__scalar_lrintf, xnn_init_qu8_conv_minmax_fp32_scalar_lrintf_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
}
}
-TEST(QU8_GEMM_MINMAX_FP32_2X4__SCALAR_LRINT, no_b_zero_point) {
+TEST(QU8_GEMM_MINMAX_FP32_2X4__SCALAR_LRINTF, no_b_zero_point) {
for (size_t k = 1; k <= 5; k += 2) {
GemmMicrokernelTester()
.mr(2)
@@ -50079,11 +50079,11 @@
.n(4)
.k(k)
.b_zero_point(0)
- .Test(xnn_qu8_gemm_minmax_fp32_ukernel_2x4__scalar_lrint, xnn_init_qu8_conv_minmax_fp32_scalar_lrint_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
+ .Test(xnn_qu8_gemm_minmax_fp32_ukernel_2x4__scalar_lrintf, xnn_init_qu8_conv_minmax_fp32_scalar_lrintf_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
}
}
-TEST(QU8_GEMM_MINMAX_FP32_2X4__SCALAR_LRINT, no_zero_point) {
+TEST(QU8_GEMM_MINMAX_FP32_2X4__SCALAR_LRINTF, no_zero_point) {
for (size_t k = 1; k <= 5; k += 2) {
GemmMicrokernelTester()
.mr(2)
@@ -50095,11 +50095,11 @@
.k(k)
.a_zero_point(0)
.b_zero_point(0)
- .Test(xnn_qu8_gemm_minmax_fp32_ukernel_2x4__scalar_lrint, xnn_init_qu8_conv_minmax_fp32_scalar_lrint_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
+ .Test(xnn_qu8_gemm_minmax_fp32_ukernel_2x4__scalar_lrintf, xnn_init_qu8_conv_minmax_fp32_scalar_lrintf_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
}
}
-TEST(QU8_GEMM_MINMAX_FP32_3X4__SCALAR_LRINT, k_eq_1) {
+TEST(QU8_GEMM_MINMAX_FP32_3X4__SCALAR_LRINTF, k_eq_1) {
GemmMicrokernelTester()
.mr(3)
.nr(4)
@@ -50108,10 +50108,10 @@
.m(3)
.n(4)
.k(1)
- .Test(xnn_qu8_gemm_minmax_fp32_ukernel_3x4__scalar_lrint, xnn_init_qu8_conv_minmax_fp32_scalar_lrint_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
+ .Test(xnn_qu8_gemm_minmax_fp32_ukernel_3x4__scalar_lrintf, xnn_init_qu8_conv_minmax_fp32_scalar_lrintf_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
}
-TEST(QU8_GEMM_MINMAX_FP32_3X4__SCALAR_LRINT, strided_cn) {
+TEST(QU8_GEMM_MINMAX_FP32_3X4__SCALAR_LRINTF, strided_cn) {
GemmMicrokernelTester()
.mr(3)
.nr(4)
@@ -50121,10 +50121,10 @@
.n(4)
.k(1)
.cn_stride(7)
- .Test(xnn_qu8_gemm_minmax_fp32_ukernel_3x4__scalar_lrint, xnn_init_qu8_conv_minmax_fp32_scalar_lrint_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
+ .Test(xnn_qu8_gemm_minmax_fp32_ukernel_3x4__scalar_lrintf, xnn_init_qu8_conv_minmax_fp32_scalar_lrintf_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
}
-TEST(QU8_GEMM_MINMAX_FP32_3X4__SCALAR_LRINT, k_eq_1_strided_a) {
+TEST(QU8_GEMM_MINMAX_FP32_3X4__SCALAR_LRINTF, k_eq_1_strided_a) {
GemmMicrokernelTester()
.mr(3)
.nr(4)
@@ -50134,10 +50134,10 @@
.n(4)
.k(1)
.a_stride(3)
- .Test(xnn_qu8_gemm_minmax_fp32_ukernel_3x4__scalar_lrint, xnn_init_qu8_conv_minmax_fp32_scalar_lrint_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
+ .Test(xnn_qu8_gemm_minmax_fp32_ukernel_3x4__scalar_lrintf, xnn_init_qu8_conv_minmax_fp32_scalar_lrintf_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
}
-TEST(QU8_GEMM_MINMAX_FP32_3X4__SCALAR_LRINT, k_eq_1_subtile) {
+TEST(QU8_GEMM_MINMAX_FP32_3X4__SCALAR_LRINTF, k_eq_1_subtile) {
for (uint32_t m = 1; m <= 3; m++) {
for (uint32_t n = 1; n <= 4; n++) {
GemmMicrokernelTester()
@@ -50149,12 +50149,12 @@
.n(n)
.k(1)
.iterations(1)
- .Test(xnn_qu8_gemm_minmax_fp32_ukernel_3x4__scalar_lrint, xnn_init_qu8_conv_minmax_fp32_scalar_lrint_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
+ .Test(xnn_qu8_gemm_minmax_fp32_ukernel_3x4__scalar_lrintf, xnn_init_qu8_conv_minmax_fp32_scalar_lrintf_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
}
}
}
-TEST(QU8_GEMM_MINMAX_FP32_3X4__SCALAR_LRINT, k_eq_1_subtile_m) {
+TEST(QU8_GEMM_MINMAX_FP32_3X4__SCALAR_LRINTF, k_eq_1_subtile_m) {
for (uint32_t m = 1; m <= 3; m++) {
GemmMicrokernelTester()
.mr(3)
@@ -50165,11 +50165,11 @@
.n(4)
.k(1)
.iterations(1)
- .Test(xnn_qu8_gemm_minmax_fp32_ukernel_3x4__scalar_lrint, xnn_init_qu8_conv_minmax_fp32_scalar_lrint_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
+ .Test(xnn_qu8_gemm_minmax_fp32_ukernel_3x4__scalar_lrintf, xnn_init_qu8_conv_minmax_fp32_scalar_lrintf_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
}
}
-TEST(QU8_GEMM_MINMAX_FP32_3X4__SCALAR_LRINT, k_eq_1_subtile_n) {
+TEST(QU8_GEMM_MINMAX_FP32_3X4__SCALAR_LRINTF, k_eq_1_subtile_n) {
for (uint32_t n = 1; n <= 4; n++) {
GemmMicrokernelTester()
.mr(3)
@@ -50180,11 +50180,11 @@
.n(n)
.k(1)
.iterations(1)
- .Test(xnn_qu8_gemm_minmax_fp32_ukernel_3x4__scalar_lrint, xnn_init_qu8_conv_minmax_fp32_scalar_lrint_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
+ .Test(xnn_qu8_gemm_minmax_fp32_ukernel_3x4__scalar_lrintf, xnn_init_qu8_conv_minmax_fp32_scalar_lrintf_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
}
}
-TEST(QU8_GEMM_MINMAX_FP32_3X4__SCALAR_LRINT, k_gt_1) {
+TEST(QU8_GEMM_MINMAX_FP32_3X4__SCALAR_LRINTF, k_gt_1) {
for (size_t k = 2; k < 10; k++) {
GemmMicrokernelTester()
.mr(3)
@@ -50194,11 +50194,11 @@
.m(3)
.n(4)
.k(k)
- .Test(xnn_qu8_gemm_minmax_fp32_ukernel_3x4__scalar_lrint, xnn_init_qu8_conv_minmax_fp32_scalar_lrint_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
+ .Test(xnn_qu8_gemm_minmax_fp32_ukernel_3x4__scalar_lrintf, xnn_init_qu8_conv_minmax_fp32_scalar_lrintf_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
}
}
-TEST(QU8_GEMM_MINMAX_FP32_3X4__SCALAR_LRINT, k_gt_1_strided_a) {
+TEST(QU8_GEMM_MINMAX_FP32_3X4__SCALAR_LRINTF, k_gt_1_strided_a) {
for (size_t k = 2; k < 10; k++) {
GemmMicrokernelTester()
.mr(3)
@@ -50209,11 +50209,11 @@
.n(4)
.k(k)
.a_stride(11)
- .Test(xnn_qu8_gemm_minmax_fp32_ukernel_3x4__scalar_lrint, xnn_init_qu8_conv_minmax_fp32_scalar_lrint_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
+ .Test(xnn_qu8_gemm_minmax_fp32_ukernel_3x4__scalar_lrintf, xnn_init_qu8_conv_minmax_fp32_scalar_lrintf_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
}
}
-TEST(QU8_GEMM_MINMAX_FP32_3X4__SCALAR_LRINT, k_gt_1_subtile) {
+TEST(QU8_GEMM_MINMAX_FP32_3X4__SCALAR_LRINTF, k_gt_1_subtile) {
for (size_t k = 2; k < 10; k++) {
for (uint32_t m = 1; m <= 3; m++) {
for (uint32_t n = 1; n <= 4; n++) {
@@ -50226,13 +50226,13 @@
.n(n)
.k(k)
.iterations(1)
- .Test(xnn_qu8_gemm_minmax_fp32_ukernel_3x4__scalar_lrint, xnn_init_qu8_conv_minmax_fp32_scalar_lrint_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
+ .Test(xnn_qu8_gemm_minmax_fp32_ukernel_3x4__scalar_lrintf, xnn_init_qu8_conv_minmax_fp32_scalar_lrintf_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
}
}
}
}
-TEST(QU8_GEMM_MINMAX_FP32_3X4__SCALAR_LRINT, n_gt_4) {
+TEST(QU8_GEMM_MINMAX_FP32_3X4__SCALAR_LRINTF, n_gt_4) {
for (uint32_t n = 5; n < 8; n++) {
for (size_t k = 1; k <= 5; k += 2) {
GemmMicrokernelTester()
@@ -50243,12 +50243,12 @@
.m(3)
.n(4)
.k(k)
- .Test(xnn_qu8_gemm_minmax_fp32_ukernel_3x4__scalar_lrint, xnn_init_qu8_conv_minmax_fp32_scalar_lrint_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
+ .Test(xnn_qu8_gemm_minmax_fp32_ukernel_3x4__scalar_lrintf, xnn_init_qu8_conv_minmax_fp32_scalar_lrintf_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
}
}
}
-TEST(QU8_GEMM_MINMAX_FP32_3X4__SCALAR_LRINT, n_gt_4_strided_cn) {
+TEST(QU8_GEMM_MINMAX_FP32_3X4__SCALAR_LRINTF, n_gt_4_strided_cn) {
for (uint32_t n = 5; n < 8; n++) {
for (size_t k = 1; k <= 5; k += 2) {
GemmMicrokernelTester()
@@ -50260,12 +50260,12 @@
.n(4)
.k(k)
.cn_stride(7)
- .Test(xnn_qu8_gemm_minmax_fp32_ukernel_3x4__scalar_lrint, xnn_init_qu8_conv_minmax_fp32_scalar_lrint_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
+ .Test(xnn_qu8_gemm_minmax_fp32_ukernel_3x4__scalar_lrintf, xnn_init_qu8_conv_minmax_fp32_scalar_lrintf_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
}
}
}
-TEST(QU8_GEMM_MINMAX_FP32_3X4__SCALAR_LRINT, n_gt_4_strided_a) {
+TEST(QU8_GEMM_MINMAX_FP32_3X4__SCALAR_LRINTF, n_gt_4_strided_a) {
for (uint32_t n = 5; n < 8; n++) {
for (size_t k = 1; k <= 5; k += 2) {
GemmMicrokernelTester()
@@ -50277,12 +50277,12 @@
.n(n)
.k(k)
.a_stride(7)
- .Test(xnn_qu8_gemm_minmax_fp32_ukernel_3x4__scalar_lrint, xnn_init_qu8_conv_minmax_fp32_scalar_lrint_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
+ .Test(xnn_qu8_gemm_minmax_fp32_ukernel_3x4__scalar_lrintf, xnn_init_qu8_conv_minmax_fp32_scalar_lrintf_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
}
}
}
-TEST(QU8_GEMM_MINMAX_FP32_3X4__SCALAR_LRINT, n_gt_4_subtile) {
+TEST(QU8_GEMM_MINMAX_FP32_3X4__SCALAR_LRINTF, n_gt_4_subtile) {
for (uint32_t n = 5; n < 8; n++) {
for (size_t k = 1; k <= 5; k += 2) {
for (uint32_t m = 1; m <= 3; m++) {
@@ -50295,13 +50295,13 @@
.n(n)
.k(k)
.iterations(1)
- .Test(xnn_qu8_gemm_minmax_fp32_ukernel_3x4__scalar_lrint, xnn_init_qu8_conv_minmax_fp32_scalar_lrint_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
+ .Test(xnn_qu8_gemm_minmax_fp32_ukernel_3x4__scalar_lrintf, xnn_init_qu8_conv_minmax_fp32_scalar_lrintf_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
}
}
}
}
-TEST(QU8_GEMM_MINMAX_FP32_3X4__SCALAR_LRINT, n_div_4) {
+TEST(QU8_GEMM_MINMAX_FP32_3X4__SCALAR_LRINTF, n_div_4) {
for (uint32_t n = 8; n <= 12; n += 4) {
for (size_t k = 1; k <= 5; k += 2) {
GemmMicrokernelTester()
@@ -50312,12 +50312,12 @@
.m(3)
.n(4)
.k(k)
- .Test(xnn_qu8_gemm_minmax_fp32_ukernel_3x4__scalar_lrint, xnn_init_qu8_conv_minmax_fp32_scalar_lrint_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
+ .Test(xnn_qu8_gemm_minmax_fp32_ukernel_3x4__scalar_lrintf, xnn_init_qu8_conv_minmax_fp32_scalar_lrintf_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
}
}
}
-TEST(QU8_GEMM_MINMAX_FP32_3X4__SCALAR_LRINT, n_div_4_strided_cn) {
+TEST(QU8_GEMM_MINMAX_FP32_3X4__SCALAR_LRINTF, n_div_4_strided_cn) {
for (uint32_t n = 8; n <= 12; n += 4) {
for (size_t k = 1; k <= 5; k += 2) {
GemmMicrokernelTester()
@@ -50329,12 +50329,12 @@
.n(n)
.k(k)
.cn_stride(7)
- .Test(xnn_qu8_gemm_minmax_fp32_ukernel_3x4__scalar_lrint, xnn_init_qu8_conv_minmax_fp32_scalar_lrint_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
+ .Test(xnn_qu8_gemm_minmax_fp32_ukernel_3x4__scalar_lrintf, xnn_init_qu8_conv_minmax_fp32_scalar_lrintf_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
}
}
}
-TEST(QU8_GEMM_MINMAX_FP32_3X4__SCALAR_LRINT, n_div_4_strided_a) {
+TEST(QU8_GEMM_MINMAX_FP32_3X4__SCALAR_LRINTF, n_div_4_strided_a) {
for (uint32_t n = 8; n <= 12; n += 4) {
for (size_t k = 1; k <= 5; k += 2) {
GemmMicrokernelTester()
@@ -50346,12 +50346,12 @@
.n(n)
.k(k)
.a_stride(7)
- .Test(xnn_qu8_gemm_minmax_fp32_ukernel_3x4__scalar_lrint, xnn_init_qu8_conv_minmax_fp32_scalar_lrint_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
+ .Test(xnn_qu8_gemm_minmax_fp32_ukernel_3x4__scalar_lrintf, xnn_init_qu8_conv_minmax_fp32_scalar_lrintf_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
}
}
}
-TEST(QU8_GEMM_MINMAX_FP32_3X4__SCALAR_LRINT, n_div_4_subtile) {
+TEST(QU8_GEMM_MINMAX_FP32_3X4__SCALAR_LRINTF, n_div_4_subtile) {
for (uint32_t n = 8; n <= 12; n += 4) {
for (size_t k = 1; k <= 5; k += 2) {
for (uint32_t m = 1; m <= 3; m++) {
@@ -50364,13 +50364,13 @@
.n(n)
.k(k)
.iterations(1)
- .Test(xnn_qu8_gemm_minmax_fp32_ukernel_3x4__scalar_lrint, xnn_init_qu8_conv_minmax_fp32_scalar_lrint_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
+ .Test(xnn_qu8_gemm_minmax_fp32_ukernel_3x4__scalar_lrintf, xnn_init_qu8_conv_minmax_fp32_scalar_lrintf_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
}
}
}
}
-TEST(QU8_GEMM_MINMAX_FP32_3X4__SCALAR_LRINT, strided_cm_subtile) {
+TEST(QU8_GEMM_MINMAX_FP32_3X4__SCALAR_LRINTF, strided_cm_subtile) {
for (size_t k = 1; k <= 5; k += 2) {
for (uint32_t m = 1; m <= 3; m++) {
for (uint32_t n = 1; n <= 4; n++) {
@@ -50384,13 +50384,13 @@
.k(k)
.cm_stride(7)
.iterations(1)
- .Test(xnn_qu8_gemm_minmax_fp32_ukernel_3x4__scalar_lrint, xnn_init_qu8_conv_minmax_fp32_scalar_lrint_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
+ .Test(xnn_qu8_gemm_minmax_fp32_ukernel_3x4__scalar_lrintf, xnn_init_qu8_conv_minmax_fp32_scalar_lrintf_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
}
}
}
}
-TEST(QU8_GEMM_MINMAX_FP32_3X4__SCALAR_LRINT, qmin) {
+TEST(QU8_GEMM_MINMAX_FP32_3X4__SCALAR_LRINTF, qmin) {
GemmMicrokernelTester()
.mr(3)
.nr(4)
@@ -50400,10 +50400,10 @@
.n(4)
.k(1)
.qmin(128)
- .Test(xnn_qu8_gemm_minmax_fp32_ukernel_3x4__scalar_lrint, xnn_init_qu8_conv_minmax_fp32_scalar_lrint_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
+ .Test(xnn_qu8_gemm_minmax_fp32_ukernel_3x4__scalar_lrintf, xnn_init_qu8_conv_minmax_fp32_scalar_lrintf_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
}
-TEST(QU8_GEMM_MINMAX_FP32_3X4__SCALAR_LRINT, qmax) {
+TEST(QU8_GEMM_MINMAX_FP32_3X4__SCALAR_LRINTF, qmax) {
GemmMicrokernelTester()
.mr(3)
.nr(4)
@@ -50413,10 +50413,10 @@
.n(4)
.k(1)
.qmax(128)
- .Test(xnn_qu8_gemm_minmax_fp32_ukernel_3x4__scalar_lrint, xnn_init_qu8_conv_minmax_fp32_scalar_lrint_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
+ .Test(xnn_qu8_gemm_minmax_fp32_ukernel_3x4__scalar_lrintf, xnn_init_qu8_conv_minmax_fp32_scalar_lrintf_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
}
-TEST(QU8_GEMM_MINMAX_FP32_3X4__SCALAR_LRINT, strided_cm) {
+TEST(QU8_GEMM_MINMAX_FP32_3X4__SCALAR_LRINTF, strided_cm) {
GemmMicrokernelTester()
.mr(3)
.nr(4)
@@ -50426,10 +50426,10 @@
.n(4)
.k(1)
.cm_stride(7)
- .Test(xnn_qu8_gemm_minmax_fp32_ukernel_3x4__scalar_lrint, xnn_init_qu8_conv_minmax_fp32_scalar_lrint_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
+ .Test(xnn_qu8_gemm_minmax_fp32_ukernel_3x4__scalar_lrintf, xnn_init_qu8_conv_minmax_fp32_scalar_lrintf_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
}
-TEST(QU8_GEMM_MINMAX_FP32_3X4__SCALAR_LRINT, no_a_zero_point) {
+TEST(QU8_GEMM_MINMAX_FP32_3X4__SCALAR_LRINTF, no_a_zero_point) {
for (size_t k = 1; k <= 5; k += 2) {
GemmMicrokernelTester()
.mr(3)
@@ -50440,11 +50440,11 @@
.n(4)
.k(k)
.a_zero_point(0)
- .Test(xnn_qu8_gemm_minmax_fp32_ukernel_3x4__scalar_lrint, xnn_init_qu8_conv_minmax_fp32_scalar_lrint_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
+ .Test(xnn_qu8_gemm_minmax_fp32_ukernel_3x4__scalar_lrintf, xnn_init_qu8_conv_minmax_fp32_scalar_lrintf_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
}
}
-TEST(QU8_GEMM_MINMAX_FP32_3X4__SCALAR_LRINT, no_b_zero_point) {
+TEST(QU8_GEMM_MINMAX_FP32_3X4__SCALAR_LRINTF, no_b_zero_point) {
for (size_t k = 1; k <= 5; k += 2) {
GemmMicrokernelTester()
.mr(3)
@@ -50455,11 +50455,11 @@
.n(4)
.k(k)
.b_zero_point(0)
- .Test(xnn_qu8_gemm_minmax_fp32_ukernel_3x4__scalar_lrint, xnn_init_qu8_conv_minmax_fp32_scalar_lrint_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
+ .Test(xnn_qu8_gemm_minmax_fp32_ukernel_3x4__scalar_lrintf, xnn_init_qu8_conv_minmax_fp32_scalar_lrintf_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
}
}
-TEST(QU8_GEMM_MINMAX_FP32_3X4__SCALAR_LRINT, no_zero_point) {
+TEST(QU8_GEMM_MINMAX_FP32_3X4__SCALAR_LRINTF, no_zero_point) {
for (size_t k = 1; k <= 5; k += 2) {
GemmMicrokernelTester()
.mr(3)
@@ -50471,11 +50471,11 @@
.k(k)
.a_zero_point(0)
.b_zero_point(0)
- .Test(xnn_qu8_gemm_minmax_fp32_ukernel_3x4__scalar_lrint, xnn_init_qu8_conv_minmax_fp32_scalar_lrint_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
+ .Test(xnn_qu8_gemm_minmax_fp32_ukernel_3x4__scalar_lrintf, xnn_init_qu8_conv_minmax_fp32_scalar_lrintf_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
}
}
-TEST(QU8_GEMM_MINMAX_FP32_4X4__SCALAR_LRINT, k_eq_1) {
+TEST(QU8_GEMM_MINMAX_FP32_4X4__SCALAR_LRINTF, k_eq_1) {
GemmMicrokernelTester()
.mr(4)
.nr(4)
@@ -50484,10 +50484,10 @@
.m(4)
.n(4)
.k(1)
- .Test(xnn_qu8_gemm_minmax_fp32_ukernel_4x4__scalar_lrint, xnn_init_qu8_conv_minmax_fp32_scalar_lrint_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
+ .Test(xnn_qu8_gemm_minmax_fp32_ukernel_4x4__scalar_lrintf, xnn_init_qu8_conv_minmax_fp32_scalar_lrintf_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
}
-TEST(QU8_GEMM_MINMAX_FP32_4X4__SCALAR_LRINT, strided_cn) {
+TEST(QU8_GEMM_MINMAX_FP32_4X4__SCALAR_LRINTF, strided_cn) {
GemmMicrokernelTester()
.mr(4)
.nr(4)
@@ -50497,10 +50497,10 @@
.n(4)
.k(1)
.cn_stride(7)
- .Test(xnn_qu8_gemm_minmax_fp32_ukernel_4x4__scalar_lrint, xnn_init_qu8_conv_minmax_fp32_scalar_lrint_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
+ .Test(xnn_qu8_gemm_minmax_fp32_ukernel_4x4__scalar_lrintf, xnn_init_qu8_conv_minmax_fp32_scalar_lrintf_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
}
-TEST(QU8_GEMM_MINMAX_FP32_4X4__SCALAR_LRINT, k_eq_1_strided_a) {
+TEST(QU8_GEMM_MINMAX_FP32_4X4__SCALAR_LRINTF, k_eq_1_strided_a) {
GemmMicrokernelTester()
.mr(4)
.nr(4)
@@ -50510,10 +50510,10 @@
.n(4)
.k(1)
.a_stride(3)
- .Test(xnn_qu8_gemm_minmax_fp32_ukernel_4x4__scalar_lrint, xnn_init_qu8_conv_minmax_fp32_scalar_lrint_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
+ .Test(xnn_qu8_gemm_minmax_fp32_ukernel_4x4__scalar_lrintf, xnn_init_qu8_conv_minmax_fp32_scalar_lrintf_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
}
-TEST(QU8_GEMM_MINMAX_FP32_4X4__SCALAR_LRINT, k_eq_1_subtile) {
+TEST(QU8_GEMM_MINMAX_FP32_4X4__SCALAR_LRINTF, k_eq_1_subtile) {
for (uint32_t m = 1; m <= 4; m++) {
for (uint32_t n = 1; n <= 4; n++) {
GemmMicrokernelTester()
@@ -50525,12 +50525,12 @@
.n(n)
.k(1)
.iterations(1)
- .Test(xnn_qu8_gemm_minmax_fp32_ukernel_4x4__scalar_lrint, xnn_init_qu8_conv_minmax_fp32_scalar_lrint_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
+ .Test(xnn_qu8_gemm_minmax_fp32_ukernel_4x4__scalar_lrintf, xnn_init_qu8_conv_minmax_fp32_scalar_lrintf_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
}
}
}
-TEST(QU8_GEMM_MINMAX_FP32_4X4__SCALAR_LRINT, k_eq_1_subtile_m) {
+TEST(QU8_GEMM_MINMAX_FP32_4X4__SCALAR_LRINTF, k_eq_1_subtile_m) {
for (uint32_t m = 1; m <= 4; m++) {
GemmMicrokernelTester()
.mr(4)
@@ -50541,11 +50541,11 @@
.n(4)
.k(1)
.iterations(1)
- .Test(xnn_qu8_gemm_minmax_fp32_ukernel_4x4__scalar_lrint, xnn_init_qu8_conv_minmax_fp32_scalar_lrint_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
+ .Test(xnn_qu8_gemm_minmax_fp32_ukernel_4x4__scalar_lrintf, xnn_init_qu8_conv_minmax_fp32_scalar_lrintf_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
}
}
-TEST(QU8_GEMM_MINMAX_FP32_4X4__SCALAR_LRINT, k_eq_1_subtile_n) {
+TEST(QU8_GEMM_MINMAX_FP32_4X4__SCALAR_LRINTF, k_eq_1_subtile_n) {
for (uint32_t n = 1; n <= 4; n++) {
GemmMicrokernelTester()
.mr(4)
@@ -50556,11 +50556,11 @@
.n(n)
.k(1)
.iterations(1)
- .Test(xnn_qu8_gemm_minmax_fp32_ukernel_4x4__scalar_lrint, xnn_init_qu8_conv_minmax_fp32_scalar_lrint_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
+ .Test(xnn_qu8_gemm_minmax_fp32_ukernel_4x4__scalar_lrintf, xnn_init_qu8_conv_minmax_fp32_scalar_lrintf_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
}
}
-TEST(QU8_GEMM_MINMAX_FP32_4X4__SCALAR_LRINT, k_gt_1) {
+TEST(QU8_GEMM_MINMAX_FP32_4X4__SCALAR_LRINTF, k_gt_1) {
for (size_t k = 2; k < 10; k++) {
GemmMicrokernelTester()
.mr(4)
@@ -50570,11 +50570,11 @@
.m(4)
.n(4)
.k(k)
- .Test(xnn_qu8_gemm_minmax_fp32_ukernel_4x4__scalar_lrint, xnn_init_qu8_conv_minmax_fp32_scalar_lrint_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
+ .Test(xnn_qu8_gemm_minmax_fp32_ukernel_4x4__scalar_lrintf, xnn_init_qu8_conv_minmax_fp32_scalar_lrintf_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
}
}
-TEST(QU8_GEMM_MINMAX_FP32_4X4__SCALAR_LRINT, k_gt_1_strided_a) {
+TEST(QU8_GEMM_MINMAX_FP32_4X4__SCALAR_LRINTF, k_gt_1_strided_a) {
for (size_t k = 2; k < 10; k++) {
GemmMicrokernelTester()
.mr(4)
@@ -50585,11 +50585,11 @@
.n(4)
.k(k)
.a_stride(11)
- .Test(xnn_qu8_gemm_minmax_fp32_ukernel_4x4__scalar_lrint, xnn_init_qu8_conv_minmax_fp32_scalar_lrint_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
+ .Test(xnn_qu8_gemm_minmax_fp32_ukernel_4x4__scalar_lrintf, xnn_init_qu8_conv_minmax_fp32_scalar_lrintf_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
}
}
-TEST(QU8_GEMM_MINMAX_FP32_4X4__SCALAR_LRINT, k_gt_1_subtile) {
+TEST(QU8_GEMM_MINMAX_FP32_4X4__SCALAR_LRINTF, k_gt_1_subtile) {
for (size_t k = 2; k < 10; k++) {
for (uint32_t m = 1; m <= 4; m++) {
for (uint32_t n = 1; n <= 4; n++) {
@@ -50602,13 +50602,13 @@
.n(n)
.k(k)
.iterations(1)
- .Test(xnn_qu8_gemm_minmax_fp32_ukernel_4x4__scalar_lrint, xnn_init_qu8_conv_minmax_fp32_scalar_lrint_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
+ .Test(xnn_qu8_gemm_minmax_fp32_ukernel_4x4__scalar_lrintf, xnn_init_qu8_conv_minmax_fp32_scalar_lrintf_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
}
}
}
}
-TEST(QU8_GEMM_MINMAX_FP32_4X4__SCALAR_LRINT, n_gt_4) {
+TEST(QU8_GEMM_MINMAX_FP32_4X4__SCALAR_LRINTF, n_gt_4) {
for (uint32_t n = 5; n < 8; n++) {
for (size_t k = 1; k <= 5; k += 2) {
GemmMicrokernelTester()
@@ -50619,12 +50619,12 @@
.m(4)
.n(4)
.k(k)
- .Test(xnn_qu8_gemm_minmax_fp32_ukernel_4x4__scalar_lrint, xnn_init_qu8_conv_minmax_fp32_scalar_lrint_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
+ .Test(xnn_qu8_gemm_minmax_fp32_ukernel_4x4__scalar_lrintf, xnn_init_qu8_conv_minmax_fp32_scalar_lrintf_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
}
}
}
-TEST(QU8_GEMM_MINMAX_FP32_4X4__SCALAR_LRINT, n_gt_4_strided_cn) {
+TEST(QU8_GEMM_MINMAX_FP32_4X4__SCALAR_LRINTF, n_gt_4_strided_cn) {
for (uint32_t n = 5; n < 8; n++) {
for (size_t k = 1; k <= 5; k += 2) {
GemmMicrokernelTester()
@@ -50636,12 +50636,12 @@
.n(4)
.k(k)
.cn_stride(7)
- .Test(xnn_qu8_gemm_minmax_fp32_ukernel_4x4__scalar_lrint, xnn_init_qu8_conv_minmax_fp32_scalar_lrint_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
+ .Test(xnn_qu8_gemm_minmax_fp32_ukernel_4x4__scalar_lrintf, xnn_init_qu8_conv_minmax_fp32_scalar_lrintf_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
}
}
}
-TEST(QU8_GEMM_MINMAX_FP32_4X4__SCALAR_LRINT, n_gt_4_strided_a) {
+TEST(QU8_GEMM_MINMAX_FP32_4X4__SCALAR_LRINTF, n_gt_4_strided_a) {
for (uint32_t n = 5; n < 8; n++) {
for (size_t k = 1; k <= 5; k += 2) {
GemmMicrokernelTester()
@@ -50653,12 +50653,12 @@
.n(n)
.k(k)
.a_stride(7)
- .Test(xnn_qu8_gemm_minmax_fp32_ukernel_4x4__scalar_lrint, xnn_init_qu8_conv_minmax_fp32_scalar_lrint_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
+ .Test(xnn_qu8_gemm_minmax_fp32_ukernel_4x4__scalar_lrintf, xnn_init_qu8_conv_minmax_fp32_scalar_lrintf_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
}
}
}
-TEST(QU8_GEMM_MINMAX_FP32_4X4__SCALAR_LRINT, n_gt_4_subtile) {
+TEST(QU8_GEMM_MINMAX_FP32_4X4__SCALAR_LRINTF, n_gt_4_subtile) {
for (uint32_t n = 5; n < 8; n++) {
for (size_t k = 1; k <= 5; k += 2) {
for (uint32_t m = 1; m <= 4; m++) {
@@ -50671,13 +50671,13 @@
.n(n)
.k(k)
.iterations(1)
- .Test(xnn_qu8_gemm_minmax_fp32_ukernel_4x4__scalar_lrint, xnn_init_qu8_conv_minmax_fp32_scalar_lrint_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
+ .Test(xnn_qu8_gemm_minmax_fp32_ukernel_4x4__scalar_lrintf, xnn_init_qu8_conv_minmax_fp32_scalar_lrintf_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
}
}
}
}
-TEST(QU8_GEMM_MINMAX_FP32_4X4__SCALAR_LRINT, n_div_4) {
+TEST(QU8_GEMM_MINMAX_FP32_4X4__SCALAR_LRINTF, n_div_4) {
for (uint32_t n = 8; n <= 12; n += 4) {
for (size_t k = 1; k <= 5; k += 2) {
GemmMicrokernelTester()
@@ -50688,12 +50688,12 @@
.m(4)
.n(4)
.k(k)
- .Test(xnn_qu8_gemm_minmax_fp32_ukernel_4x4__scalar_lrint, xnn_init_qu8_conv_minmax_fp32_scalar_lrint_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
+ .Test(xnn_qu8_gemm_minmax_fp32_ukernel_4x4__scalar_lrintf, xnn_init_qu8_conv_minmax_fp32_scalar_lrintf_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
}
}
}
-TEST(QU8_GEMM_MINMAX_FP32_4X4__SCALAR_LRINT, n_div_4_strided_cn) {
+TEST(QU8_GEMM_MINMAX_FP32_4X4__SCALAR_LRINTF, n_div_4_strided_cn) {
for (uint32_t n = 8; n <= 12; n += 4) {
for (size_t k = 1; k <= 5; k += 2) {
GemmMicrokernelTester()
@@ -50705,12 +50705,12 @@
.n(n)
.k(k)
.cn_stride(7)
- .Test(xnn_qu8_gemm_minmax_fp32_ukernel_4x4__scalar_lrint, xnn_init_qu8_conv_minmax_fp32_scalar_lrint_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
+ .Test(xnn_qu8_gemm_minmax_fp32_ukernel_4x4__scalar_lrintf, xnn_init_qu8_conv_minmax_fp32_scalar_lrintf_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
}
}
}
-TEST(QU8_GEMM_MINMAX_FP32_4X4__SCALAR_LRINT, n_div_4_strided_a) {
+TEST(QU8_GEMM_MINMAX_FP32_4X4__SCALAR_LRINTF, n_div_4_strided_a) {
for (uint32_t n = 8; n <= 12; n += 4) {
for (size_t k = 1; k <= 5; k += 2) {
GemmMicrokernelTester()
@@ -50722,12 +50722,12 @@
.n(n)
.k(k)
.a_stride(7)
- .Test(xnn_qu8_gemm_minmax_fp32_ukernel_4x4__scalar_lrint, xnn_init_qu8_conv_minmax_fp32_scalar_lrint_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
+ .Test(xnn_qu8_gemm_minmax_fp32_ukernel_4x4__scalar_lrintf, xnn_init_qu8_conv_minmax_fp32_scalar_lrintf_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
}
}
}
-TEST(QU8_GEMM_MINMAX_FP32_4X4__SCALAR_LRINT, n_div_4_subtile) {
+TEST(QU8_GEMM_MINMAX_FP32_4X4__SCALAR_LRINTF, n_div_4_subtile) {
for (uint32_t n = 8; n <= 12; n += 4) {
for (size_t k = 1; k <= 5; k += 2) {
for (uint32_t m = 1; m <= 4; m++) {
@@ -50740,13 +50740,13 @@
.n(n)
.k(k)
.iterations(1)
- .Test(xnn_qu8_gemm_minmax_fp32_ukernel_4x4__scalar_lrint, xnn_init_qu8_conv_minmax_fp32_scalar_lrint_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
+ .Test(xnn_qu8_gemm_minmax_fp32_ukernel_4x4__scalar_lrintf, xnn_init_qu8_conv_minmax_fp32_scalar_lrintf_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
}
}
}
}
-TEST(QU8_GEMM_MINMAX_FP32_4X4__SCALAR_LRINT, strided_cm_subtile) {
+TEST(QU8_GEMM_MINMAX_FP32_4X4__SCALAR_LRINTF, strided_cm_subtile) {
for (size_t k = 1; k <= 5; k += 2) {
for (uint32_t m = 1; m <= 4; m++) {
for (uint32_t n = 1; n <= 4; n++) {
@@ -50760,13 +50760,13 @@
.k(k)
.cm_stride(7)
.iterations(1)
- .Test(xnn_qu8_gemm_minmax_fp32_ukernel_4x4__scalar_lrint, xnn_init_qu8_conv_minmax_fp32_scalar_lrint_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
+ .Test(xnn_qu8_gemm_minmax_fp32_ukernel_4x4__scalar_lrintf, xnn_init_qu8_conv_minmax_fp32_scalar_lrintf_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
}
}
}
}
-TEST(QU8_GEMM_MINMAX_FP32_4X4__SCALAR_LRINT, qmin) {
+TEST(QU8_GEMM_MINMAX_FP32_4X4__SCALAR_LRINTF, qmin) {
GemmMicrokernelTester()
.mr(4)
.nr(4)
@@ -50776,10 +50776,10 @@
.n(4)
.k(1)
.qmin(128)
- .Test(xnn_qu8_gemm_minmax_fp32_ukernel_4x4__scalar_lrint, xnn_init_qu8_conv_minmax_fp32_scalar_lrint_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
+ .Test(xnn_qu8_gemm_minmax_fp32_ukernel_4x4__scalar_lrintf, xnn_init_qu8_conv_minmax_fp32_scalar_lrintf_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
}
-TEST(QU8_GEMM_MINMAX_FP32_4X4__SCALAR_LRINT, qmax) {
+TEST(QU8_GEMM_MINMAX_FP32_4X4__SCALAR_LRINTF, qmax) {
GemmMicrokernelTester()
.mr(4)
.nr(4)
@@ -50789,10 +50789,10 @@
.n(4)
.k(1)
.qmax(128)
- .Test(xnn_qu8_gemm_minmax_fp32_ukernel_4x4__scalar_lrint, xnn_init_qu8_conv_minmax_fp32_scalar_lrint_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
+ .Test(xnn_qu8_gemm_minmax_fp32_ukernel_4x4__scalar_lrintf, xnn_init_qu8_conv_minmax_fp32_scalar_lrintf_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
}
-TEST(QU8_GEMM_MINMAX_FP32_4X4__SCALAR_LRINT, strided_cm) {
+TEST(QU8_GEMM_MINMAX_FP32_4X4__SCALAR_LRINTF, strided_cm) {
GemmMicrokernelTester()
.mr(4)
.nr(4)
@@ -50802,10 +50802,10 @@
.n(4)
.k(1)
.cm_stride(7)
- .Test(xnn_qu8_gemm_minmax_fp32_ukernel_4x4__scalar_lrint, xnn_init_qu8_conv_minmax_fp32_scalar_lrint_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
+ .Test(xnn_qu8_gemm_minmax_fp32_ukernel_4x4__scalar_lrintf, xnn_init_qu8_conv_minmax_fp32_scalar_lrintf_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
}
-TEST(QU8_GEMM_MINMAX_FP32_4X4__SCALAR_LRINT, no_a_zero_point) {
+TEST(QU8_GEMM_MINMAX_FP32_4X4__SCALAR_LRINTF, no_a_zero_point) {
for (size_t k = 1; k <= 5; k += 2) {
GemmMicrokernelTester()
.mr(4)
@@ -50816,11 +50816,11 @@
.n(4)
.k(k)
.a_zero_point(0)
- .Test(xnn_qu8_gemm_minmax_fp32_ukernel_4x4__scalar_lrint, xnn_init_qu8_conv_minmax_fp32_scalar_lrint_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
+ .Test(xnn_qu8_gemm_minmax_fp32_ukernel_4x4__scalar_lrintf, xnn_init_qu8_conv_minmax_fp32_scalar_lrintf_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
}
}
-TEST(QU8_GEMM_MINMAX_FP32_4X4__SCALAR_LRINT, no_b_zero_point) {
+TEST(QU8_GEMM_MINMAX_FP32_4X4__SCALAR_LRINTF, no_b_zero_point) {
for (size_t k = 1; k <= 5; k += 2) {
GemmMicrokernelTester()
.mr(4)
@@ -50831,11 +50831,11 @@
.n(4)
.k(k)
.b_zero_point(0)
- .Test(xnn_qu8_gemm_minmax_fp32_ukernel_4x4__scalar_lrint, xnn_init_qu8_conv_minmax_fp32_scalar_lrint_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
+ .Test(xnn_qu8_gemm_minmax_fp32_ukernel_4x4__scalar_lrintf, xnn_init_qu8_conv_minmax_fp32_scalar_lrintf_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
}
}
-TEST(QU8_GEMM_MINMAX_FP32_4X4__SCALAR_LRINT, no_zero_point) {
+TEST(QU8_GEMM_MINMAX_FP32_4X4__SCALAR_LRINTF, no_zero_point) {
for (size_t k = 1; k <= 5; k += 2) {
GemmMicrokernelTester()
.mr(4)
@@ -50847,11 +50847,11 @@
.k(k)
.a_zero_point(0)
.b_zero_point(0)
- .Test(xnn_qu8_gemm_minmax_fp32_ukernel_4x4__scalar_lrint, xnn_init_qu8_conv_minmax_fp32_scalar_lrint_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
+ .Test(xnn_qu8_gemm_minmax_fp32_ukernel_4x4__scalar_lrintf, xnn_init_qu8_conv_minmax_fp32_scalar_lrintf_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
}
}
-TEST(QU8_GEMM_MINMAX_FP32_1X2__SCALAR_MAGIC, k_eq_1) {
+TEST(QU8_GEMM_MINMAX_FP32_1X2__SCALAR_FMAGIC, k_eq_1) {
GemmMicrokernelTester()
.mr(1)
.nr(2)
@@ -50860,10 +50860,10 @@
.m(1)
.n(2)
.k(1)
- .Test(xnn_qu8_gemm_minmax_fp32_ukernel_1x2__scalar_magic, xnn_init_qu8_conv_minmax_fp32_scalar_magic_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
+ .Test(xnn_qu8_gemm_minmax_fp32_ukernel_1x2__scalar_fmagic, xnn_init_qu8_conv_minmax_fp32_scalar_fmagic_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
}
-TEST(QU8_GEMM_MINMAX_FP32_1X2__SCALAR_MAGIC, strided_cn) {
+TEST(QU8_GEMM_MINMAX_FP32_1X2__SCALAR_FMAGIC, strided_cn) {
GemmMicrokernelTester()
.mr(1)
.nr(2)
@@ -50873,10 +50873,10 @@
.n(2)
.k(1)
.cn_stride(5)
- .Test(xnn_qu8_gemm_minmax_fp32_ukernel_1x2__scalar_magic, xnn_init_qu8_conv_minmax_fp32_scalar_magic_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
+ .Test(xnn_qu8_gemm_minmax_fp32_ukernel_1x2__scalar_fmagic, xnn_init_qu8_conv_minmax_fp32_scalar_fmagic_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
}
-TEST(QU8_GEMM_MINMAX_FP32_1X2__SCALAR_MAGIC, k_eq_1_strided_a) {
+TEST(QU8_GEMM_MINMAX_FP32_1X2__SCALAR_FMAGIC, k_eq_1_strided_a) {
GemmMicrokernelTester()
.mr(1)
.nr(2)
@@ -50886,10 +50886,10 @@
.n(2)
.k(1)
.a_stride(3)
- .Test(xnn_qu8_gemm_minmax_fp32_ukernel_1x2__scalar_magic, xnn_init_qu8_conv_minmax_fp32_scalar_magic_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
+ .Test(xnn_qu8_gemm_minmax_fp32_ukernel_1x2__scalar_fmagic, xnn_init_qu8_conv_minmax_fp32_scalar_fmagic_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
}
-TEST(QU8_GEMM_MINMAX_FP32_1X2__SCALAR_MAGIC, k_eq_1_subtile) {
+TEST(QU8_GEMM_MINMAX_FP32_1X2__SCALAR_FMAGIC, k_eq_1_subtile) {
for (uint32_t m = 1; m <= 1; m++) {
for (uint32_t n = 1; n <= 2; n++) {
GemmMicrokernelTester()
@@ -50901,12 +50901,12 @@
.n(n)
.k(1)
.iterations(1)
- .Test(xnn_qu8_gemm_minmax_fp32_ukernel_1x2__scalar_magic, xnn_init_qu8_conv_minmax_fp32_scalar_magic_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
+ .Test(xnn_qu8_gemm_minmax_fp32_ukernel_1x2__scalar_fmagic, xnn_init_qu8_conv_minmax_fp32_scalar_fmagic_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
}
}
}
-TEST(QU8_GEMM_MINMAX_FP32_1X2__SCALAR_MAGIC, k_eq_1_subtile_m) {
+TEST(QU8_GEMM_MINMAX_FP32_1X2__SCALAR_FMAGIC, k_eq_1_subtile_m) {
for (uint32_t m = 1; m <= 1; m++) {
GemmMicrokernelTester()
.mr(1)
@@ -50917,11 +50917,11 @@
.n(2)
.k(1)
.iterations(1)
- .Test(xnn_qu8_gemm_minmax_fp32_ukernel_1x2__scalar_magic, xnn_init_qu8_conv_minmax_fp32_scalar_magic_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
+ .Test(xnn_qu8_gemm_minmax_fp32_ukernel_1x2__scalar_fmagic, xnn_init_qu8_conv_minmax_fp32_scalar_fmagic_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
}
}
-TEST(QU8_GEMM_MINMAX_FP32_1X2__SCALAR_MAGIC, k_eq_1_subtile_n) {
+TEST(QU8_GEMM_MINMAX_FP32_1X2__SCALAR_FMAGIC, k_eq_1_subtile_n) {
for (uint32_t n = 1; n <= 2; n++) {
GemmMicrokernelTester()
.mr(1)
@@ -50932,11 +50932,11 @@
.n(n)
.k(1)
.iterations(1)
- .Test(xnn_qu8_gemm_minmax_fp32_ukernel_1x2__scalar_magic, xnn_init_qu8_conv_minmax_fp32_scalar_magic_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
+ .Test(xnn_qu8_gemm_minmax_fp32_ukernel_1x2__scalar_fmagic, xnn_init_qu8_conv_minmax_fp32_scalar_fmagic_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
}
}
-TEST(QU8_GEMM_MINMAX_FP32_1X2__SCALAR_MAGIC, k_gt_1) {
+TEST(QU8_GEMM_MINMAX_FP32_1X2__SCALAR_FMAGIC, k_gt_1) {
for (size_t k = 2; k < 10; k++) {
GemmMicrokernelTester()
.mr(1)
@@ -50946,11 +50946,11 @@
.m(1)
.n(2)
.k(k)
- .Test(xnn_qu8_gemm_minmax_fp32_ukernel_1x2__scalar_magic, xnn_init_qu8_conv_minmax_fp32_scalar_magic_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
+ .Test(xnn_qu8_gemm_minmax_fp32_ukernel_1x2__scalar_fmagic, xnn_init_qu8_conv_minmax_fp32_scalar_fmagic_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
}
}
-TEST(QU8_GEMM_MINMAX_FP32_1X2__SCALAR_MAGIC, k_gt_1_strided_a) {
+TEST(QU8_GEMM_MINMAX_FP32_1X2__SCALAR_FMAGIC, k_gt_1_strided_a) {
for (size_t k = 2; k < 10; k++) {
GemmMicrokernelTester()
.mr(1)
@@ -50961,11 +50961,11 @@
.n(2)
.k(k)
.a_stride(11)
- .Test(xnn_qu8_gemm_minmax_fp32_ukernel_1x2__scalar_magic, xnn_init_qu8_conv_minmax_fp32_scalar_magic_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
+ .Test(xnn_qu8_gemm_minmax_fp32_ukernel_1x2__scalar_fmagic, xnn_init_qu8_conv_minmax_fp32_scalar_fmagic_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
}
}
-TEST(QU8_GEMM_MINMAX_FP32_1X2__SCALAR_MAGIC, k_gt_1_subtile) {
+TEST(QU8_GEMM_MINMAX_FP32_1X2__SCALAR_FMAGIC, k_gt_1_subtile) {
for (size_t k = 2; k < 10; k++) {
for (uint32_t m = 1; m <= 1; m++) {
for (uint32_t n = 1; n <= 2; n++) {
@@ -50978,13 +50978,13 @@
.n(n)
.k(k)
.iterations(1)
- .Test(xnn_qu8_gemm_minmax_fp32_ukernel_1x2__scalar_magic, xnn_init_qu8_conv_minmax_fp32_scalar_magic_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
+ .Test(xnn_qu8_gemm_minmax_fp32_ukernel_1x2__scalar_fmagic, xnn_init_qu8_conv_minmax_fp32_scalar_fmagic_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
}
}
}
}
-TEST(QU8_GEMM_MINMAX_FP32_1X2__SCALAR_MAGIC, n_gt_2) {
+TEST(QU8_GEMM_MINMAX_FP32_1X2__SCALAR_FMAGIC, n_gt_2) {
for (uint32_t n = 3; n < 4; n++) {
for (size_t k = 1; k <= 5; k += 2) {
GemmMicrokernelTester()
@@ -50995,12 +50995,12 @@
.m(1)
.n(2)
.k(k)
- .Test(xnn_qu8_gemm_minmax_fp32_ukernel_1x2__scalar_magic, xnn_init_qu8_conv_minmax_fp32_scalar_magic_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
+ .Test(xnn_qu8_gemm_minmax_fp32_ukernel_1x2__scalar_fmagic, xnn_init_qu8_conv_minmax_fp32_scalar_fmagic_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
}
}
}
-TEST(QU8_GEMM_MINMAX_FP32_1X2__SCALAR_MAGIC, n_gt_2_strided_cn) {
+TEST(QU8_GEMM_MINMAX_FP32_1X2__SCALAR_FMAGIC, n_gt_2_strided_cn) {
for (uint32_t n = 3; n < 4; n++) {
for (size_t k = 1; k <= 5; k += 2) {
GemmMicrokernelTester()
@@ -51012,12 +51012,12 @@
.n(2)
.k(k)
.cn_stride(5)
- .Test(xnn_qu8_gemm_minmax_fp32_ukernel_1x2__scalar_magic, xnn_init_qu8_conv_minmax_fp32_scalar_magic_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
+ .Test(xnn_qu8_gemm_minmax_fp32_ukernel_1x2__scalar_fmagic, xnn_init_qu8_conv_minmax_fp32_scalar_fmagic_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
}
}
}
-TEST(QU8_GEMM_MINMAX_FP32_1X2__SCALAR_MAGIC, n_gt_2_strided_a) {
+TEST(QU8_GEMM_MINMAX_FP32_1X2__SCALAR_FMAGIC, n_gt_2_strided_a) {
for (uint32_t n = 3; n < 4; n++) {
for (size_t k = 1; k <= 5; k += 2) {
GemmMicrokernelTester()
@@ -51029,12 +51029,12 @@
.n(n)
.k(k)
.a_stride(7)
- .Test(xnn_qu8_gemm_minmax_fp32_ukernel_1x2__scalar_magic, xnn_init_qu8_conv_minmax_fp32_scalar_magic_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
+ .Test(xnn_qu8_gemm_minmax_fp32_ukernel_1x2__scalar_fmagic, xnn_init_qu8_conv_minmax_fp32_scalar_fmagic_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
}
}
}
-TEST(QU8_GEMM_MINMAX_FP32_1X2__SCALAR_MAGIC, n_gt_2_subtile) {
+TEST(QU8_GEMM_MINMAX_FP32_1X2__SCALAR_FMAGIC, n_gt_2_subtile) {
for (uint32_t n = 3; n < 4; n++) {
for (size_t k = 1; k <= 5; k += 2) {
for (uint32_t m = 1; m <= 1; m++) {
@@ -51047,13 +51047,13 @@
.n(n)
.k(k)
.iterations(1)
- .Test(xnn_qu8_gemm_minmax_fp32_ukernel_1x2__scalar_magic, xnn_init_qu8_conv_minmax_fp32_scalar_magic_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
+ .Test(xnn_qu8_gemm_minmax_fp32_ukernel_1x2__scalar_fmagic, xnn_init_qu8_conv_minmax_fp32_scalar_fmagic_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
}
}
}
}
-TEST(QU8_GEMM_MINMAX_FP32_1X2__SCALAR_MAGIC, n_div_2) {
+TEST(QU8_GEMM_MINMAX_FP32_1X2__SCALAR_FMAGIC, n_div_2) {
for (uint32_t n = 4; n <= 6; n += 2) {
for (size_t k = 1; k <= 5; k += 2) {
GemmMicrokernelTester()
@@ -51064,12 +51064,12 @@
.m(1)
.n(2)
.k(k)
- .Test(xnn_qu8_gemm_minmax_fp32_ukernel_1x2__scalar_magic, xnn_init_qu8_conv_minmax_fp32_scalar_magic_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
+ .Test(xnn_qu8_gemm_minmax_fp32_ukernel_1x2__scalar_fmagic, xnn_init_qu8_conv_minmax_fp32_scalar_fmagic_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
}
}
}
-TEST(QU8_GEMM_MINMAX_FP32_1X2__SCALAR_MAGIC, n_div_2_strided_cn) {
+TEST(QU8_GEMM_MINMAX_FP32_1X2__SCALAR_FMAGIC, n_div_2_strided_cn) {
for (uint32_t n = 4; n <= 6; n += 2) {
for (size_t k = 1; k <= 5; k += 2) {
GemmMicrokernelTester()
@@ -51081,12 +51081,12 @@
.n(n)
.k(k)
.cn_stride(5)
- .Test(xnn_qu8_gemm_minmax_fp32_ukernel_1x2__scalar_magic, xnn_init_qu8_conv_minmax_fp32_scalar_magic_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
+ .Test(xnn_qu8_gemm_minmax_fp32_ukernel_1x2__scalar_fmagic, xnn_init_qu8_conv_minmax_fp32_scalar_fmagic_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
}
}
}
-TEST(QU8_GEMM_MINMAX_FP32_1X2__SCALAR_MAGIC, n_div_2_strided_a) {
+TEST(QU8_GEMM_MINMAX_FP32_1X2__SCALAR_FMAGIC, n_div_2_strided_a) {
for (uint32_t n = 4; n <= 6; n += 2) {
for (size_t k = 1; k <= 5; k += 2) {
GemmMicrokernelTester()
@@ -51098,12 +51098,12 @@
.n(n)
.k(k)
.a_stride(7)
- .Test(xnn_qu8_gemm_minmax_fp32_ukernel_1x2__scalar_magic, xnn_init_qu8_conv_minmax_fp32_scalar_magic_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
+ .Test(xnn_qu8_gemm_minmax_fp32_ukernel_1x2__scalar_fmagic, xnn_init_qu8_conv_minmax_fp32_scalar_fmagic_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
}
}
}
-TEST(QU8_GEMM_MINMAX_FP32_1X2__SCALAR_MAGIC, n_div_2_subtile) {
+TEST(QU8_GEMM_MINMAX_FP32_1X2__SCALAR_FMAGIC, n_div_2_subtile) {
for (uint32_t n = 4; n <= 6; n += 2) {
for (size_t k = 1; k <= 5; k += 2) {
for (uint32_t m = 1; m <= 1; m++) {
@@ -51116,13 +51116,13 @@
.n(n)
.k(k)
.iterations(1)
- .Test(xnn_qu8_gemm_minmax_fp32_ukernel_1x2__scalar_magic, xnn_init_qu8_conv_minmax_fp32_scalar_magic_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
+ .Test(xnn_qu8_gemm_minmax_fp32_ukernel_1x2__scalar_fmagic, xnn_init_qu8_conv_minmax_fp32_scalar_fmagic_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
}
}
}
}
-TEST(QU8_GEMM_MINMAX_FP32_1X2__SCALAR_MAGIC, strided_cm_subtile) {
+TEST(QU8_GEMM_MINMAX_FP32_1X2__SCALAR_FMAGIC, strided_cm_subtile) {
for (size_t k = 1; k <= 5; k += 2) {
for (uint32_t m = 1; m <= 1; m++) {
for (uint32_t n = 1; n <= 2; n++) {
@@ -51136,13 +51136,13 @@
.k(k)
.cm_stride(5)
.iterations(1)
- .Test(xnn_qu8_gemm_minmax_fp32_ukernel_1x2__scalar_magic, xnn_init_qu8_conv_minmax_fp32_scalar_magic_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
+ .Test(xnn_qu8_gemm_minmax_fp32_ukernel_1x2__scalar_fmagic, xnn_init_qu8_conv_minmax_fp32_scalar_fmagic_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
}
}
}
}
-TEST(QU8_GEMM_MINMAX_FP32_1X2__SCALAR_MAGIC, qmin) {
+TEST(QU8_GEMM_MINMAX_FP32_1X2__SCALAR_FMAGIC, qmin) {
GemmMicrokernelTester()
.mr(1)
.nr(2)
@@ -51152,10 +51152,10 @@
.n(2)
.k(1)
.qmin(128)
- .Test(xnn_qu8_gemm_minmax_fp32_ukernel_1x2__scalar_magic, xnn_init_qu8_conv_minmax_fp32_scalar_magic_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
+ .Test(xnn_qu8_gemm_minmax_fp32_ukernel_1x2__scalar_fmagic, xnn_init_qu8_conv_minmax_fp32_scalar_fmagic_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
}
-TEST(QU8_GEMM_MINMAX_FP32_1X2__SCALAR_MAGIC, qmax) {
+TEST(QU8_GEMM_MINMAX_FP32_1X2__SCALAR_FMAGIC, qmax) {
GemmMicrokernelTester()
.mr(1)
.nr(2)
@@ -51165,10 +51165,10 @@
.n(2)
.k(1)
.qmax(128)
- .Test(xnn_qu8_gemm_minmax_fp32_ukernel_1x2__scalar_magic, xnn_init_qu8_conv_minmax_fp32_scalar_magic_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
+ .Test(xnn_qu8_gemm_minmax_fp32_ukernel_1x2__scalar_fmagic, xnn_init_qu8_conv_minmax_fp32_scalar_fmagic_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
}
-TEST(QU8_GEMM_MINMAX_FP32_1X2__SCALAR_MAGIC, strided_cm) {
+TEST(QU8_GEMM_MINMAX_FP32_1X2__SCALAR_FMAGIC, strided_cm) {
GemmMicrokernelTester()
.mr(1)
.nr(2)
@@ -51178,10 +51178,10 @@
.n(2)
.k(1)
.cm_stride(5)
- .Test(xnn_qu8_gemm_minmax_fp32_ukernel_1x2__scalar_magic, xnn_init_qu8_conv_minmax_fp32_scalar_magic_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
+ .Test(xnn_qu8_gemm_minmax_fp32_ukernel_1x2__scalar_fmagic, xnn_init_qu8_conv_minmax_fp32_scalar_fmagic_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
}
-TEST(QU8_GEMM_MINMAX_FP32_1X2__SCALAR_MAGIC, no_a_zero_point) {
+TEST(QU8_GEMM_MINMAX_FP32_1X2__SCALAR_FMAGIC, no_a_zero_point) {
for (size_t k = 1; k <= 5; k += 2) {
GemmMicrokernelTester()
.mr(1)
@@ -51192,11 +51192,11 @@
.n(2)
.k(k)
.a_zero_point(0)
- .Test(xnn_qu8_gemm_minmax_fp32_ukernel_1x2__scalar_magic, xnn_init_qu8_conv_minmax_fp32_scalar_magic_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
+ .Test(xnn_qu8_gemm_minmax_fp32_ukernel_1x2__scalar_fmagic, xnn_init_qu8_conv_minmax_fp32_scalar_fmagic_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
}
}
-TEST(QU8_GEMM_MINMAX_FP32_1X2__SCALAR_MAGIC, no_b_zero_point) {
+TEST(QU8_GEMM_MINMAX_FP32_1X2__SCALAR_FMAGIC, no_b_zero_point) {
for (size_t k = 1; k <= 5; k += 2) {
GemmMicrokernelTester()
.mr(1)
@@ -51207,11 +51207,11 @@
.n(2)
.k(k)
.b_zero_point(0)
- .Test(xnn_qu8_gemm_minmax_fp32_ukernel_1x2__scalar_magic, xnn_init_qu8_conv_minmax_fp32_scalar_magic_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
+ .Test(xnn_qu8_gemm_minmax_fp32_ukernel_1x2__scalar_fmagic, xnn_init_qu8_conv_minmax_fp32_scalar_fmagic_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
}
}
-TEST(QU8_GEMM_MINMAX_FP32_1X2__SCALAR_MAGIC, no_zero_point) {
+TEST(QU8_GEMM_MINMAX_FP32_1X2__SCALAR_FMAGIC, no_zero_point) {
for (size_t k = 1; k <= 5; k += 2) {
GemmMicrokernelTester()
.mr(1)
@@ -51223,11 +51223,11 @@
.k(k)
.a_zero_point(0)
.b_zero_point(0)
- .Test(xnn_qu8_gemm_minmax_fp32_ukernel_1x2__scalar_magic, xnn_init_qu8_conv_minmax_fp32_scalar_magic_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
+ .Test(xnn_qu8_gemm_minmax_fp32_ukernel_1x2__scalar_fmagic, xnn_init_qu8_conv_minmax_fp32_scalar_fmagic_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
}
}
-TEST(QU8_GEMM_MINMAX_FP32_2X2__SCALAR_MAGIC, k_eq_1) {
+TEST(QU8_GEMM_MINMAX_FP32_2X2__SCALAR_FMAGIC, k_eq_1) {
GemmMicrokernelTester()
.mr(2)
.nr(2)
@@ -51236,10 +51236,10 @@
.m(2)
.n(2)
.k(1)
- .Test(xnn_qu8_gemm_minmax_fp32_ukernel_2x2__scalar_magic, xnn_init_qu8_conv_minmax_fp32_scalar_magic_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
+ .Test(xnn_qu8_gemm_minmax_fp32_ukernel_2x2__scalar_fmagic, xnn_init_qu8_conv_minmax_fp32_scalar_fmagic_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
}
-TEST(QU8_GEMM_MINMAX_FP32_2X2__SCALAR_MAGIC, strided_cn) {
+TEST(QU8_GEMM_MINMAX_FP32_2X2__SCALAR_FMAGIC, strided_cn) {
GemmMicrokernelTester()
.mr(2)
.nr(2)
@@ -51249,10 +51249,10 @@
.n(2)
.k(1)
.cn_stride(5)
- .Test(xnn_qu8_gemm_minmax_fp32_ukernel_2x2__scalar_magic, xnn_init_qu8_conv_minmax_fp32_scalar_magic_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
+ .Test(xnn_qu8_gemm_minmax_fp32_ukernel_2x2__scalar_fmagic, xnn_init_qu8_conv_minmax_fp32_scalar_fmagic_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
}
-TEST(QU8_GEMM_MINMAX_FP32_2X2__SCALAR_MAGIC, k_eq_1_strided_a) {
+TEST(QU8_GEMM_MINMAX_FP32_2X2__SCALAR_FMAGIC, k_eq_1_strided_a) {
GemmMicrokernelTester()
.mr(2)
.nr(2)
@@ -51262,10 +51262,10 @@
.n(2)
.k(1)
.a_stride(3)
- .Test(xnn_qu8_gemm_minmax_fp32_ukernel_2x2__scalar_magic, xnn_init_qu8_conv_minmax_fp32_scalar_magic_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
+ .Test(xnn_qu8_gemm_minmax_fp32_ukernel_2x2__scalar_fmagic, xnn_init_qu8_conv_minmax_fp32_scalar_fmagic_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
}
-TEST(QU8_GEMM_MINMAX_FP32_2X2__SCALAR_MAGIC, k_eq_1_subtile) {
+TEST(QU8_GEMM_MINMAX_FP32_2X2__SCALAR_FMAGIC, k_eq_1_subtile) {
for (uint32_t m = 1; m <= 2; m++) {
for (uint32_t n = 1; n <= 2; n++) {
GemmMicrokernelTester()
@@ -51277,12 +51277,12 @@
.n(n)
.k(1)
.iterations(1)
- .Test(xnn_qu8_gemm_minmax_fp32_ukernel_2x2__scalar_magic, xnn_init_qu8_conv_minmax_fp32_scalar_magic_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
+ .Test(xnn_qu8_gemm_minmax_fp32_ukernel_2x2__scalar_fmagic, xnn_init_qu8_conv_minmax_fp32_scalar_fmagic_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
}
}
}
-TEST(QU8_GEMM_MINMAX_FP32_2X2__SCALAR_MAGIC, k_eq_1_subtile_m) {
+TEST(QU8_GEMM_MINMAX_FP32_2X2__SCALAR_FMAGIC, k_eq_1_subtile_m) {
for (uint32_t m = 1; m <= 2; m++) {
GemmMicrokernelTester()
.mr(2)
@@ -51293,11 +51293,11 @@
.n(2)
.k(1)
.iterations(1)
- .Test(xnn_qu8_gemm_minmax_fp32_ukernel_2x2__scalar_magic, xnn_init_qu8_conv_minmax_fp32_scalar_magic_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
+ .Test(xnn_qu8_gemm_minmax_fp32_ukernel_2x2__scalar_fmagic, xnn_init_qu8_conv_minmax_fp32_scalar_fmagic_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
}
}
-TEST(QU8_GEMM_MINMAX_FP32_2X2__SCALAR_MAGIC, k_eq_1_subtile_n) {
+TEST(QU8_GEMM_MINMAX_FP32_2X2__SCALAR_FMAGIC, k_eq_1_subtile_n) {
for (uint32_t n = 1; n <= 2; n++) {
GemmMicrokernelTester()
.mr(2)
@@ -51308,11 +51308,11 @@
.n(n)
.k(1)
.iterations(1)
- .Test(xnn_qu8_gemm_minmax_fp32_ukernel_2x2__scalar_magic, xnn_init_qu8_conv_minmax_fp32_scalar_magic_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
+ .Test(xnn_qu8_gemm_minmax_fp32_ukernel_2x2__scalar_fmagic, xnn_init_qu8_conv_minmax_fp32_scalar_fmagic_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
}
}
-TEST(QU8_GEMM_MINMAX_FP32_2X2__SCALAR_MAGIC, k_gt_1) {
+TEST(QU8_GEMM_MINMAX_FP32_2X2__SCALAR_FMAGIC, k_gt_1) {
for (size_t k = 2; k < 10; k++) {
GemmMicrokernelTester()
.mr(2)
@@ -51322,11 +51322,11 @@
.m(2)
.n(2)
.k(k)
- .Test(xnn_qu8_gemm_minmax_fp32_ukernel_2x2__scalar_magic, xnn_init_qu8_conv_minmax_fp32_scalar_magic_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
+ .Test(xnn_qu8_gemm_minmax_fp32_ukernel_2x2__scalar_fmagic, xnn_init_qu8_conv_minmax_fp32_scalar_fmagic_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
}
}
-TEST(QU8_GEMM_MINMAX_FP32_2X2__SCALAR_MAGIC, k_gt_1_strided_a) {
+TEST(QU8_GEMM_MINMAX_FP32_2X2__SCALAR_FMAGIC, k_gt_1_strided_a) {
for (size_t k = 2; k < 10; k++) {
GemmMicrokernelTester()
.mr(2)
@@ -51337,11 +51337,11 @@
.n(2)
.k(k)
.a_stride(11)
- .Test(xnn_qu8_gemm_minmax_fp32_ukernel_2x2__scalar_magic, xnn_init_qu8_conv_minmax_fp32_scalar_magic_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
+ .Test(xnn_qu8_gemm_minmax_fp32_ukernel_2x2__scalar_fmagic, xnn_init_qu8_conv_minmax_fp32_scalar_fmagic_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
}
}
-TEST(QU8_GEMM_MINMAX_FP32_2X2__SCALAR_MAGIC, k_gt_1_subtile) {
+TEST(QU8_GEMM_MINMAX_FP32_2X2__SCALAR_FMAGIC, k_gt_1_subtile) {
for (size_t k = 2; k < 10; k++) {
for (uint32_t m = 1; m <= 2; m++) {
for (uint32_t n = 1; n <= 2; n++) {
@@ -51354,13 +51354,13 @@
.n(n)
.k(k)
.iterations(1)
- .Test(xnn_qu8_gemm_minmax_fp32_ukernel_2x2__scalar_magic, xnn_init_qu8_conv_minmax_fp32_scalar_magic_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
+ .Test(xnn_qu8_gemm_minmax_fp32_ukernel_2x2__scalar_fmagic, xnn_init_qu8_conv_minmax_fp32_scalar_fmagic_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
}
}
}
}
-TEST(QU8_GEMM_MINMAX_FP32_2X2__SCALAR_MAGIC, n_gt_2) {
+TEST(QU8_GEMM_MINMAX_FP32_2X2__SCALAR_FMAGIC, n_gt_2) {
for (uint32_t n = 3; n < 4; n++) {
for (size_t k = 1; k <= 5; k += 2) {
GemmMicrokernelTester()
@@ -51371,12 +51371,12 @@
.m(2)
.n(2)
.k(k)
- .Test(xnn_qu8_gemm_minmax_fp32_ukernel_2x2__scalar_magic, xnn_init_qu8_conv_minmax_fp32_scalar_magic_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
+ .Test(xnn_qu8_gemm_minmax_fp32_ukernel_2x2__scalar_fmagic, xnn_init_qu8_conv_minmax_fp32_scalar_fmagic_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
}
}
}
-TEST(QU8_GEMM_MINMAX_FP32_2X2__SCALAR_MAGIC, n_gt_2_strided_cn) {
+TEST(QU8_GEMM_MINMAX_FP32_2X2__SCALAR_FMAGIC, n_gt_2_strided_cn) {
for (uint32_t n = 3; n < 4; n++) {
for (size_t k = 1; k <= 5; k += 2) {
GemmMicrokernelTester()
@@ -51388,12 +51388,12 @@
.n(2)
.k(k)
.cn_stride(5)
- .Test(xnn_qu8_gemm_minmax_fp32_ukernel_2x2__scalar_magic, xnn_init_qu8_conv_minmax_fp32_scalar_magic_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
+ .Test(xnn_qu8_gemm_minmax_fp32_ukernel_2x2__scalar_fmagic, xnn_init_qu8_conv_minmax_fp32_scalar_fmagic_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
}
}
}
-TEST(QU8_GEMM_MINMAX_FP32_2X2__SCALAR_MAGIC, n_gt_2_strided_a) {
+TEST(QU8_GEMM_MINMAX_FP32_2X2__SCALAR_FMAGIC, n_gt_2_strided_a) {
for (uint32_t n = 3; n < 4; n++) {
for (size_t k = 1; k <= 5; k += 2) {
GemmMicrokernelTester()
@@ -51405,12 +51405,12 @@
.n(n)
.k(k)
.a_stride(7)
- .Test(xnn_qu8_gemm_minmax_fp32_ukernel_2x2__scalar_magic, xnn_init_qu8_conv_minmax_fp32_scalar_magic_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
+ .Test(xnn_qu8_gemm_minmax_fp32_ukernel_2x2__scalar_fmagic, xnn_init_qu8_conv_minmax_fp32_scalar_fmagic_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
}
}
}
-TEST(QU8_GEMM_MINMAX_FP32_2X2__SCALAR_MAGIC, n_gt_2_subtile) {
+TEST(QU8_GEMM_MINMAX_FP32_2X2__SCALAR_FMAGIC, n_gt_2_subtile) {
for (uint32_t n = 3; n < 4; n++) {
for (size_t k = 1; k <= 5; k += 2) {
for (uint32_t m = 1; m <= 2; m++) {
@@ -51423,13 +51423,13 @@
.n(n)
.k(k)
.iterations(1)
- .Test(xnn_qu8_gemm_minmax_fp32_ukernel_2x2__scalar_magic, xnn_init_qu8_conv_minmax_fp32_scalar_magic_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
+ .Test(xnn_qu8_gemm_minmax_fp32_ukernel_2x2__scalar_fmagic, xnn_init_qu8_conv_minmax_fp32_scalar_fmagic_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
}
}
}
}
-TEST(QU8_GEMM_MINMAX_FP32_2X2__SCALAR_MAGIC, n_div_2) {
+TEST(QU8_GEMM_MINMAX_FP32_2X2__SCALAR_FMAGIC, n_div_2) {
for (uint32_t n = 4; n <= 6; n += 2) {
for (size_t k = 1; k <= 5; k += 2) {
GemmMicrokernelTester()
@@ -51440,12 +51440,12 @@
.m(2)
.n(2)
.k(k)
- .Test(xnn_qu8_gemm_minmax_fp32_ukernel_2x2__scalar_magic, xnn_init_qu8_conv_minmax_fp32_scalar_magic_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
+ .Test(xnn_qu8_gemm_minmax_fp32_ukernel_2x2__scalar_fmagic, xnn_init_qu8_conv_minmax_fp32_scalar_fmagic_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
}
}
}
-TEST(QU8_GEMM_MINMAX_FP32_2X2__SCALAR_MAGIC, n_div_2_strided_cn) {
+TEST(QU8_GEMM_MINMAX_FP32_2X2__SCALAR_FMAGIC, n_div_2_strided_cn) {
for (uint32_t n = 4; n <= 6; n += 2) {
for (size_t k = 1; k <= 5; k += 2) {
GemmMicrokernelTester()
@@ -51457,12 +51457,12 @@
.n(n)
.k(k)
.cn_stride(5)
- .Test(xnn_qu8_gemm_minmax_fp32_ukernel_2x2__scalar_magic, xnn_init_qu8_conv_minmax_fp32_scalar_magic_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
+ .Test(xnn_qu8_gemm_minmax_fp32_ukernel_2x2__scalar_fmagic, xnn_init_qu8_conv_minmax_fp32_scalar_fmagic_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
}
}
}
-TEST(QU8_GEMM_MINMAX_FP32_2X2__SCALAR_MAGIC, n_div_2_strided_a) {
+TEST(QU8_GEMM_MINMAX_FP32_2X2__SCALAR_FMAGIC, n_div_2_strided_a) {
for (uint32_t n = 4; n <= 6; n += 2) {
for (size_t k = 1; k <= 5; k += 2) {
GemmMicrokernelTester()
@@ -51474,12 +51474,12 @@
.n(n)
.k(k)
.a_stride(7)
- .Test(xnn_qu8_gemm_minmax_fp32_ukernel_2x2__scalar_magic, xnn_init_qu8_conv_minmax_fp32_scalar_magic_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
+ .Test(xnn_qu8_gemm_minmax_fp32_ukernel_2x2__scalar_fmagic, xnn_init_qu8_conv_minmax_fp32_scalar_fmagic_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
}
}
}
-TEST(QU8_GEMM_MINMAX_FP32_2X2__SCALAR_MAGIC, n_div_2_subtile) {
+TEST(QU8_GEMM_MINMAX_FP32_2X2__SCALAR_FMAGIC, n_div_2_subtile) {
for (uint32_t n = 4; n <= 6; n += 2) {
for (size_t k = 1; k <= 5; k += 2) {
for (uint32_t m = 1; m <= 2; m++) {
@@ -51492,13 +51492,13 @@
.n(n)
.k(k)
.iterations(1)
- .Test(xnn_qu8_gemm_minmax_fp32_ukernel_2x2__scalar_magic, xnn_init_qu8_conv_minmax_fp32_scalar_magic_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
+ .Test(xnn_qu8_gemm_minmax_fp32_ukernel_2x2__scalar_fmagic, xnn_init_qu8_conv_minmax_fp32_scalar_fmagic_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
}
}
}
}
-TEST(QU8_GEMM_MINMAX_FP32_2X2__SCALAR_MAGIC, strided_cm_subtile) {
+TEST(QU8_GEMM_MINMAX_FP32_2X2__SCALAR_FMAGIC, strided_cm_subtile) {
for (size_t k = 1; k <= 5; k += 2) {
for (uint32_t m = 1; m <= 2; m++) {
for (uint32_t n = 1; n <= 2; n++) {
@@ -51512,13 +51512,13 @@
.k(k)
.cm_stride(5)
.iterations(1)
- .Test(xnn_qu8_gemm_minmax_fp32_ukernel_2x2__scalar_magic, xnn_init_qu8_conv_minmax_fp32_scalar_magic_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
+ .Test(xnn_qu8_gemm_minmax_fp32_ukernel_2x2__scalar_fmagic, xnn_init_qu8_conv_minmax_fp32_scalar_fmagic_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
}
}
}
}
-TEST(QU8_GEMM_MINMAX_FP32_2X2__SCALAR_MAGIC, qmin) {
+TEST(QU8_GEMM_MINMAX_FP32_2X2__SCALAR_FMAGIC, qmin) {
GemmMicrokernelTester()
.mr(2)
.nr(2)
@@ -51528,10 +51528,10 @@
.n(2)
.k(1)
.qmin(128)
- .Test(xnn_qu8_gemm_minmax_fp32_ukernel_2x2__scalar_magic, xnn_init_qu8_conv_minmax_fp32_scalar_magic_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
+ .Test(xnn_qu8_gemm_minmax_fp32_ukernel_2x2__scalar_fmagic, xnn_init_qu8_conv_minmax_fp32_scalar_fmagic_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
}
-TEST(QU8_GEMM_MINMAX_FP32_2X2__SCALAR_MAGIC, qmax) {
+TEST(QU8_GEMM_MINMAX_FP32_2X2__SCALAR_FMAGIC, qmax) {
GemmMicrokernelTester()
.mr(2)
.nr(2)
@@ -51541,10 +51541,10 @@
.n(2)
.k(1)
.qmax(128)
- .Test(xnn_qu8_gemm_minmax_fp32_ukernel_2x2__scalar_magic, xnn_init_qu8_conv_minmax_fp32_scalar_magic_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
+ .Test(xnn_qu8_gemm_minmax_fp32_ukernel_2x2__scalar_fmagic, xnn_init_qu8_conv_minmax_fp32_scalar_fmagic_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
}
-TEST(QU8_GEMM_MINMAX_FP32_2X2__SCALAR_MAGIC, strided_cm) {
+TEST(QU8_GEMM_MINMAX_FP32_2X2__SCALAR_FMAGIC, strided_cm) {
GemmMicrokernelTester()
.mr(2)
.nr(2)
@@ -51554,10 +51554,10 @@
.n(2)
.k(1)
.cm_stride(5)
- .Test(xnn_qu8_gemm_minmax_fp32_ukernel_2x2__scalar_magic, xnn_init_qu8_conv_minmax_fp32_scalar_magic_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
+ .Test(xnn_qu8_gemm_minmax_fp32_ukernel_2x2__scalar_fmagic, xnn_init_qu8_conv_minmax_fp32_scalar_fmagic_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
}
-TEST(QU8_GEMM_MINMAX_FP32_2X2__SCALAR_MAGIC, no_a_zero_point) {
+TEST(QU8_GEMM_MINMAX_FP32_2X2__SCALAR_FMAGIC, no_a_zero_point) {
for (size_t k = 1; k <= 5; k += 2) {
GemmMicrokernelTester()
.mr(2)
@@ -51568,11 +51568,11 @@
.n(2)
.k(k)
.a_zero_point(0)
- .Test(xnn_qu8_gemm_minmax_fp32_ukernel_2x2__scalar_magic, xnn_init_qu8_conv_minmax_fp32_scalar_magic_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
+ .Test(xnn_qu8_gemm_minmax_fp32_ukernel_2x2__scalar_fmagic, xnn_init_qu8_conv_minmax_fp32_scalar_fmagic_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
}
}
-TEST(QU8_GEMM_MINMAX_FP32_2X2__SCALAR_MAGIC, no_b_zero_point) {
+TEST(QU8_GEMM_MINMAX_FP32_2X2__SCALAR_FMAGIC, no_b_zero_point) {
for (size_t k = 1; k <= 5; k += 2) {
GemmMicrokernelTester()
.mr(2)
@@ -51583,11 +51583,11 @@
.n(2)
.k(k)
.b_zero_point(0)
- .Test(xnn_qu8_gemm_minmax_fp32_ukernel_2x2__scalar_magic, xnn_init_qu8_conv_minmax_fp32_scalar_magic_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
+ .Test(xnn_qu8_gemm_minmax_fp32_ukernel_2x2__scalar_fmagic, xnn_init_qu8_conv_minmax_fp32_scalar_fmagic_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
}
}
-TEST(QU8_GEMM_MINMAX_FP32_2X2__SCALAR_MAGIC, no_zero_point) {
+TEST(QU8_GEMM_MINMAX_FP32_2X2__SCALAR_FMAGIC, no_zero_point) {
for (size_t k = 1; k <= 5; k += 2) {
GemmMicrokernelTester()
.mr(2)
@@ -51599,11 +51599,11 @@
.k(k)
.a_zero_point(0)
.b_zero_point(0)
- .Test(xnn_qu8_gemm_minmax_fp32_ukernel_2x2__scalar_magic, xnn_init_qu8_conv_minmax_fp32_scalar_magic_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
+ .Test(xnn_qu8_gemm_minmax_fp32_ukernel_2x2__scalar_fmagic, xnn_init_qu8_conv_minmax_fp32_scalar_fmagic_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
}
}
-TEST(QU8_GEMM_MINMAX_FP32_3X2__SCALAR_MAGIC, k_eq_1) {
+TEST(QU8_GEMM_MINMAX_FP32_3X2__SCALAR_FMAGIC, k_eq_1) {
GemmMicrokernelTester()
.mr(3)
.nr(2)
@@ -51612,10 +51612,10 @@
.m(3)
.n(2)
.k(1)
- .Test(xnn_qu8_gemm_minmax_fp32_ukernel_3x2__scalar_magic, xnn_init_qu8_conv_minmax_fp32_scalar_magic_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
+ .Test(xnn_qu8_gemm_minmax_fp32_ukernel_3x2__scalar_fmagic, xnn_init_qu8_conv_minmax_fp32_scalar_fmagic_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
}
-TEST(QU8_GEMM_MINMAX_FP32_3X2__SCALAR_MAGIC, strided_cn) {
+TEST(QU8_GEMM_MINMAX_FP32_3X2__SCALAR_FMAGIC, strided_cn) {
GemmMicrokernelTester()
.mr(3)
.nr(2)
@@ -51625,10 +51625,10 @@
.n(2)
.k(1)
.cn_stride(5)
- .Test(xnn_qu8_gemm_minmax_fp32_ukernel_3x2__scalar_magic, xnn_init_qu8_conv_minmax_fp32_scalar_magic_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
+ .Test(xnn_qu8_gemm_minmax_fp32_ukernel_3x2__scalar_fmagic, xnn_init_qu8_conv_minmax_fp32_scalar_fmagic_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
}
-TEST(QU8_GEMM_MINMAX_FP32_3X2__SCALAR_MAGIC, k_eq_1_strided_a) {
+TEST(QU8_GEMM_MINMAX_FP32_3X2__SCALAR_FMAGIC, k_eq_1_strided_a) {
GemmMicrokernelTester()
.mr(3)
.nr(2)
@@ -51638,10 +51638,10 @@
.n(2)
.k(1)
.a_stride(3)
- .Test(xnn_qu8_gemm_minmax_fp32_ukernel_3x2__scalar_magic, xnn_init_qu8_conv_minmax_fp32_scalar_magic_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
+ .Test(xnn_qu8_gemm_minmax_fp32_ukernel_3x2__scalar_fmagic, xnn_init_qu8_conv_minmax_fp32_scalar_fmagic_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
}
-TEST(QU8_GEMM_MINMAX_FP32_3X2__SCALAR_MAGIC, k_eq_1_subtile) {
+TEST(QU8_GEMM_MINMAX_FP32_3X2__SCALAR_FMAGIC, k_eq_1_subtile) {
for (uint32_t m = 1; m <= 3; m++) {
for (uint32_t n = 1; n <= 2; n++) {
GemmMicrokernelTester()
@@ -51653,12 +51653,12 @@
.n(n)
.k(1)
.iterations(1)
- .Test(xnn_qu8_gemm_minmax_fp32_ukernel_3x2__scalar_magic, xnn_init_qu8_conv_minmax_fp32_scalar_magic_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
+ .Test(xnn_qu8_gemm_minmax_fp32_ukernel_3x2__scalar_fmagic, xnn_init_qu8_conv_minmax_fp32_scalar_fmagic_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
}
}
}
-TEST(QU8_GEMM_MINMAX_FP32_3X2__SCALAR_MAGIC, k_eq_1_subtile_m) {
+TEST(QU8_GEMM_MINMAX_FP32_3X2__SCALAR_FMAGIC, k_eq_1_subtile_m) {
for (uint32_t m = 1; m <= 3; m++) {
GemmMicrokernelTester()
.mr(3)
@@ -51669,11 +51669,11 @@
.n(2)
.k(1)
.iterations(1)
- .Test(xnn_qu8_gemm_minmax_fp32_ukernel_3x2__scalar_magic, xnn_init_qu8_conv_minmax_fp32_scalar_magic_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
+ .Test(xnn_qu8_gemm_minmax_fp32_ukernel_3x2__scalar_fmagic, xnn_init_qu8_conv_minmax_fp32_scalar_fmagic_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
}
}
-TEST(QU8_GEMM_MINMAX_FP32_3X2__SCALAR_MAGIC, k_eq_1_subtile_n) {
+TEST(QU8_GEMM_MINMAX_FP32_3X2__SCALAR_FMAGIC, k_eq_1_subtile_n) {
for (uint32_t n = 1; n <= 2; n++) {
GemmMicrokernelTester()
.mr(3)
@@ -51684,11 +51684,11 @@
.n(n)
.k(1)
.iterations(1)
- .Test(xnn_qu8_gemm_minmax_fp32_ukernel_3x2__scalar_magic, xnn_init_qu8_conv_minmax_fp32_scalar_magic_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
+ .Test(xnn_qu8_gemm_minmax_fp32_ukernel_3x2__scalar_fmagic, xnn_init_qu8_conv_minmax_fp32_scalar_fmagic_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
}
}
-TEST(QU8_GEMM_MINMAX_FP32_3X2__SCALAR_MAGIC, k_gt_1) {
+TEST(QU8_GEMM_MINMAX_FP32_3X2__SCALAR_FMAGIC, k_gt_1) {
for (size_t k = 2; k < 10; k++) {
GemmMicrokernelTester()
.mr(3)
@@ -51698,11 +51698,11 @@
.m(3)
.n(2)
.k(k)
- .Test(xnn_qu8_gemm_minmax_fp32_ukernel_3x2__scalar_magic, xnn_init_qu8_conv_minmax_fp32_scalar_magic_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
+ .Test(xnn_qu8_gemm_minmax_fp32_ukernel_3x2__scalar_fmagic, xnn_init_qu8_conv_minmax_fp32_scalar_fmagic_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
}
}
-TEST(QU8_GEMM_MINMAX_FP32_3X2__SCALAR_MAGIC, k_gt_1_strided_a) {
+TEST(QU8_GEMM_MINMAX_FP32_3X2__SCALAR_FMAGIC, k_gt_1_strided_a) {
for (size_t k = 2; k < 10; k++) {
GemmMicrokernelTester()
.mr(3)
@@ -51713,11 +51713,11 @@
.n(2)
.k(k)
.a_stride(11)
- .Test(xnn_qu8_gemm_minmax_fp32_ukernel_3x2__scalar_magic, xnn_init_qu8_conv_minmax_fp32_scalar_magic_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
+ .Test(xnn_qu8_gemm_minmax_fp32_ukernel_3x2__scalar_fmagic, xnn_init_qu8_conv_minmax_fp32_scalar_fmagic_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
}
}
-TEST(QU8_GEMM_MINMAX_FP32_3X2__SCALAR_MAGIC, k_gt_1_subtile) {
+TEST(QU8_GEMM_MINMAX_FP32_3X2__SCALAR_FMAGIC, k_gt_1_subtile) {
for (size_t k = 2; k < 10; k++) {
for (uint32_t m = 1; m <= 3; m++) {
for (uint32_t n = 1; n <= 2; n++) {
@@ -51730,13 +51730,13 @@
.n(n)
.k(k)
.iterations(1)
- .Test(xnn_qu8_gemm_minmax_fp32_ukernel_3x2__scalar_magic, xnn_init_qu8_conv_minmax_fp32_scalar_magic_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
+ .Test(xnn_qu8_gemm_minmax_fp32_ukernel_3x2__scalar_fmagic, xnn_init_qu8_conv_minmax_fp32_scalar_fmagic_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
}
}
}
}
-TEST(QU8_GEMM_MINMAX_FP32_3X2__SCALAR_MAGIC, n_gt_2) {
+TEST(QU8_GEMM_MINMAX_FP32_3X2__SCALAR_FMAGIC, n_gt_2) {
for (uint32_t n = 3; n < 4; n++) {
for (size_t k = 1; k <= 5; k += 2) {
GemmMicrokernelTester()
@@ -51747,12 +51747,12 @@
.m(3)
.n(2)
.k(k)
- .Test(xnn_qu8_gemm_minmax_fp32_ukernel_3x2__scalar_magic, xnn_init_qu8_conv_minmax_fp32_scalar_magic_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
+ .Test(xnn_qu8_gemm_minmax_fp32_ukernel_3x2__scalar_fmagic, xnn_init_qu8_conv_minmax_fp32_scalar_fmagic_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
}
}
}
-TEST(QU8_GEMM_MINMAX_FP32_3X2__SCALAR_MAGIC, n_gt_2_strided_cn) {
+TEST(QU8_GEMM_MINMAX_FP32_3X2__SCALAR_FMAGIC, n_gt_2_strided_cn) {
for (uint32_t n = 3; n < 4; n++) {
for (size_t k = 1; k <= 5; k += 2) {
GemmMicrokernelTester()
@@ -51764,12 +51764,12 @@
.n(2)
.k(k)
.cn_stride(5)
- .Test(xnn_qu8_gemm_minmax_fp32_ukernel_3x2__scalar_magic, xnn_init_qu8_conv_minmax_fp32_scalar_magic_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
+ .Test(xnn_qu8_gemm_minmax_fp32_ukernel_3x2__scalar_fmagic, xnn_init_qu8_conv_minmax_fp32_scalar_fmagic_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
}
}
}
-TEST(QU8_GEMM_MINMAX_FP32_3X2__SCALAR_MAGIC, n_gt_2_strided_a) {
+TEST(QU8_GEMM_MINMAX_FP32_3X2__SCALAR_FMAGIC, n_gt_2_strided_a) {
for (uint32_t n = 3; n < 4; n++) {
for (size_t k = 1; k <= 5; k += 2) {
GemmMicrokernelTester()
@@ -51781,12 +51781,12 @@
.n(n)
.k(k)
.a_stride(7)
- .Test(xnn_qu8_gemm_minmax_fp32_ukernel_3x2__scalar_magic, xnn_init_qu8_conv_minmax_fp32_scalar_magic_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
+ .Test(xnn_qu8_gemm_minmax_fp32_ukernel_3x2__scalar_fmagic, xnn_init_qu8_conv_minmax_fp32_scalar_fmagic_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
}
}
}
-TEST(QU8_GEMM_MINMAX_FP32_3X2__SCALAR_MAGIC, n_gt_2_subtile) {
+TEST(QU8_GEMM_MINMAX_FP32_3X2__SCALAR_FMAGIC, n_gt_2_subtile) {
for (uint32_t n = 3; n < 4; n++) {
for (size_t k = 1; k <= 5; k += 2) {
for (uint32_t m = 1; m <= 3; m++) {
@@ -51799,13 +51799,13 @@
.n(n)
.k(k)
.iterations(1)
- .Test(xnn_qu8_gemm_minmax_fp32_ukernel_3x2__scalar_magic, xnn_init_qu8_conv_minmax_fp32_scalar_magic_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
+ .Test(xnn_qu8_gemm_minmax_fp32_ukernel_3x2__scalar_fmagic, xnn_init_qu8_conv_minmax_fp32_scalar_fmagic_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
}
}
}
}
-TEST(QU8_GEMM_MINMAX_FP32_3X2__SCALAR_MAGIC, n_div_2) {
+TEST(QU8_GEMM_MINMAX_FP32_3X2__SCALAR_FMAGIC, n_div_2) {
for (uint32_t n = 4; n <= 6; n += 2) {
for (size_t k = 1; k <= 5; k += 2) {
GemmMicrokernelTester()
@@ -51816,12 +51816,12 @@
.m(3)
.n(2)
.k(k)
- .Test(xnn_qu8_gemm_minmax_fp32_ukernel_3x2__scalar_magic, xnn_init_qu8_conv_minmax_fp32_scalar_magic_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
+ .Test(xnn_qu8_gemm_minmax_fp32_ukernel_3x2__scalar_fmagic, xnn_init_qu8_conv_minmax_fp32_scalar_fmagic_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
}
}
}
-TEST(QU8_GEMM_MINMAX_FP32_3X2__SCALAR_MAGIC, n_div_2_strided_cn) {
+TEST(QU8_GEMM_MINMAX_FP32_3X2__SCALAR_FMAGIC, n_div_2_strided_cn) {
for (uint32_t n = 4; n <= 6; n += 2) {
for (size_t k = 1; k <= 5; k += 2) {
GemmMicrokernelTester()
@@ -51833,12 +51833,12 @@
.n(n)
.k(k)
.cn_stride(5)
- .Test(xnn_qu8_gemm_minmax_fp32_ukernel_3x2__scalar_magic, xnn_init_qu8_conv_minmax_fp32_scalar_magic_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
+ .Test(xnn_qu8_gemm_minmax_fp32_ukernel_3x2__scalar_fmagic, xnn_init_qu8_conv_minmax_fp32_scalar_fmagic_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
}
}
}
-TEST(QU8_GEMM_MINMAX_FP32_3X2__SCALAR_MAGIC, n_div_2_strided_a) {
+TEST(QU8_GEMM_MINMAX_FP32_3X2__SCALAR_FMAGIC, n_div_2_strided_a) {
for (uint32_t n = 4; n <= 6; n += 2) {
for (size_t k = 1; k <= 5; k += 2) {
GemmMicrokernelTester()
@@ -51850,12 +51850,12 @@
.n(n)
.k(k)
.a_stride(7)
- .Test(xnn_qu8_gemm_minmax_fp32_ukernel_3x2__scalar_magic, xnn_init_qu8_conv_minmax_fp32_scalar_magic_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
+ .Test(xnn_qu8_gemm_minmax_fp32_ukernel_3x2__scalar_fmagic, xnn_init_qu8_conv_minmax_fp32_scalar_fmagic_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
}
}
}
-TEST(QU8_GEMM_MINMAX_FP32_3X2__SCALAR_MAGIC, n_div_2_subtile) {
+TEST(QU8_GEMM_MINMAX_FP32_3X2__SCALAR_FMAGIC, n_div_2_subtile) {
for (uint32_t n = 4; n <= 6; n += 2) {
for (size_t k = 1; k <= 5; k += 2) {
for (uint32_t m = 1; m <= 3; m++) {
@@ -51868,13 +51868,13 @@
.n(n)
.k(k)
.iterations(1)
- .Test(xnn_qu8_gemm_minmax_fp32_ukernel_3x2__scalar_magic, xnn_init_qu8_conv_minmax_fp32_scalar_magic_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
+ .Test(xnn_qu8_gemm_minmax_fp32_ukernel_3x2__scalar_fmagic, xnn_init_qu8_conv_minmax_fp32_scalar_fmagic_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
}
}
}
}
-TEST(QU8_GEMM_MINMAX_FP32_3X2__SCALAR_MAGIC, strided_cm_subtile) {
+TEST(QU8_GEMM_MINMAX_FP32_3X2__SCALAR_FMAGIC, strided_cm_subtile) {
for (size_t k = 1; k <= 5; k += 2) {
for (uint32_t m = 1; m <= 3; m++) {
for (uint32_t n = 1; n <= 2; n++) {
@@ -51888,13 +51888,13 @@
.k(k)
.cm_stride(5)
.iterations(1)
- .Test(xnn_qu8_gemm_minmax_fp32_ukernel_3x2__scalar_magic, xnn_init_qu8_conv_minmax_fp32_scalar_magic_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
+ .Test(xnn_qu8_gemm_minmax_fp32_ukernel_3x2__scalar_fmagic, xnn_init_qu8_conv_minmax_fp32_scalar_fmagic_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
}
}
}
}
-TEST(QU8_GEMM_MINMAX_FP32_3X2__SCALAR_MAGIC, qmin) {
+TEST(QU8_GEMM_MINMAX_FP32_3X2__SCALAR_FMAGIC, qmin) {
GemmMicrokernelTester()
.mr(3)
.nr(2)
@@ -51904,10 +51904,10 @@
.n(2)
.k(1)
.qmin(128)
- .Test(xnn_qu8_gemm_minmax_fp32_ukernel_3x2__scalar_magic, xnn_init_qu8_conv_minmax_fp32_scalar_magic_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
+ .Test(xnn_qu8_gemm_minmax_fp32_ukernel_3x2__scalar_fmagic, xnn_init_qu8_conv_minmax_fp32_scalar_fmagic_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
}
-TEST(QU8_GEMM_MINMAX_FP32_3X2__SCALAR_MAGIC, qmax) {
+TEST(QU8_GEMM_MINMAX_FP32_3X2__SCALAR_FMAGIC, qmax) {
GemmMicrokernelTester()
.mr(3)
.nr(2)
@@ -51917,10 +51917,10 @@
.n(2)
.k(1)
.qmax(128)
- .Test(xnn_qu8_gemm_minmax_fp32_ukernel_3x2__scalar_magic, xnn_init_qu8_conv_minmax_fp32_scalar_magic_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
+ .Test(xnn_qu8_gemm_minmax_fp32_ukernel_3x2__scalar_fmagic, xnn_init_qu8_conv_minmax_fp32_scalar_fmagic_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
}
-TEST(QU8_GEMM_MINMAX_FP32_3X2__SCALAR_MAGIC, strided_cm) {
+TEST(QU8_GEMM_MINMAX_FP32_3X2__SCALAR_FMAGIC, strided_cm) {
GemmMicrokernelTester()
.mr(3)
.nr(2)
@@ -51930,10 +51930,10 @@
.n(2)
.k(1)
.cm_stride(5)
- .Test(xnn_qu8_gemm_minmax_fp32_ukernel_3x2__scalar_magic, xnn_init_qu8_conv_minmax_fp32_scalar_magic_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
+ .Test(xnn_qu8_gemm_minmax_fp32_ukernel_3x2__scalar_fmagic, xnn_init_qu8_conv_minmax_fp32_scalar_fmagic_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
}
-TEST(QU8_GEMM_MINMAX_FP32_3X2__SCALAR_MAGIC, no_a_zero_point) {
+TEST(QU8_GEMM_MINMAX_FP32_3X2__SCALAR_FMAGIC, no_a_zero_point) {
for (size_t k = 1; k <= 5; k += 2) {
GemmMicrokernelTester()
.mr(3)
@@ -51944,11 +51944,11 @@
.n(2)
.k(k)
.a_zero_point(0)
- .Test(xnn_qu8_gemm_minmax_fp32_ukernel_3x2__scalar_magic, xnn_init_qu8_conv_minmax_fp32_scalar_magic_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
+ .Test(xnn_qu8_gemm_minmax_fp32_ukernel_3x2__scalar_fmagic, xnn_init_qu8_conv_minmax_fp32_scalar_fmagic_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
}
}
-TEST(QU8_GEMM_MINMAX_FP32_3X2__SCALAR_MAGIC, no_b_zero_point) {
+TEST(QU8_GEMM_MINMAX_FP32_3X2__SCALAR_FMAGIC, no_b_zero_point) {
for (size_t k = 1; k <= 5; k += 2) {
GemmMicrokernelTester()
.mr(3)
@@ -51959,11 +51959,11 @@
.n(2)
.k(k)
.b_zero_point(0)
- .Test(xnn_qu8_gemm_minmax_fp32_ukernel_3x2__scalar_magic, xnn_init_qu8_conv_minmax_fp32_scalar_magic_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
+ .Test(xnn_qu8_gemm_minmax_fp32_ukernel_3x2__scalar_fmagic, xnn_init_qu8_conv_minmax_fp32_scalar_fmagic_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
}
}
-TEST(QU8_GEMM_MINMAX_FP32_3X2__SCALAR_MAGIC, no_zero_point) {
+TEST(QU8_GEMM_MINMAX_FP32_3X2__SCALAR_FMAGIC, no_zero_point) {
for (size_t k = 1; k <= 5; k += 2) {
GemmMicrokernelTester()
.mr(3)
@@ -51975,11 +51975,11 @@
.k(k)
.a_zero_point(0)
.b_zero_point(0)
- .Test(xnn_qu8_gemm_minmax_fp32_ukernel_3x2__scalar_magic, xnn_init_qu8_conv_minmax_fp32_scalar_magic_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
+ .Test(xnn_qu8_gemm_minmax_fp32_ukernel_3x2__scalar_fmagic, xnn_init_qu8_conv_minmax_fp32_scalar_fmagic_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
}
}
-TEST(QU8_GEMM_MINMAX_FP32_4X2__SCALAR_MAGIC, k_eq_1) {
+TEST(QU8_GEMM_MINMAX_FP32_4X2__SCALAR_FMAGIC, k_eq_1) {
GemmMicrokernelTester()
.mr(4)
.nr(2)
@@ -51988,10 +51988,10 @@
.m(4)
.n(2)
.k(1)
- .Test(xnn_qu8_gemm_minmax_fp32_ukernel_4x2__scalar_magic, xnn_init_qu8_conv_minmax_fp32_scalar_magic_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
+ .Test(xnn_qu8_gemm_minmax_fp32_ukernel_4x2__scalar_fmagic, xnn_init_qu8_conv_minmax_fp32_scalar_fmagic_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
}
-TEST(QU8_GEMM_MINMAX_FP32_4X2__SCALAR_MAGIC, strided_cn) {
+TEST(QU8_GEMM_MINMAX_FP32_4X2__SCALAR_FMAGIC, strided_cn) {
GemmMicrokernelTester()
.mr(4)
.nr(2)
@@ -52001,10 +52001,10 @@
.n(2)
.k(1)
.cn_stride(5)
- .Test(xnn_qu8_gemm_minmax_fp32_ukernel_4x2__scalar_magic, xnn_init_qu8_conv_minmax_fp32_scalar_magic_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
+ .Test(xnn_qu8_gemm_minmax_fp32_ukernel_4x2__scalar_fmagic, xnn_init_qu8_conv_minmax_fp32_scalar_fmagic_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
}
-TEST(QU8_GEMM_MINMAX_FP32_4X2__SCALAR_MAGIC, k_eq_1_strided_a) {
+TEST(QU8_GEMM_MINMAX_FP32_4X2__SCALAR_FMAGIC, k_eq_1_strided_a) {
GemmMicrokernelTester()
.mr(4)
.nr(2)
@@ -52014,10 +52014,10 @@
.n(2)
.k(1)
.a_stride(3)
- .Test(xnn_qu8_gemm_minmax_fp32_ukernel_4x2__scalar_magic, xnn_init_qu8_conv_minmax_fp32_scalar_magic_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
+ .Test(xnn_qu8_gemm_minmax_fp32_ukernel_4x2__scalar_fmagic, xnn_init_qu8_conv_minmax_fp32_scalar_fmagic_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
}
-TEST(QU8_GEMM_MINMAX_FP32_4X2__SCALAR_MAGIC, k_eq_1_subtile) {
+TEST(QU8_GEMM_MINMAX_FP32_4X2__SCALAR_FMAGIC, k_eq_1_subtile) {
for (uint32_t m = 1; m <= 4; m++) {
for (uint32_t n = 1; n <= 2; n++) {
GemmMicrokernelTester()
@@ -52029,12 +52029,12 @@
.n(n)
.k(1)
.iterations(1)
- .Test(xnn_qu8_gemm_minmax_fp32_ukernel_4x2__scalar_magic, xnn_init_qu8_conv_minmax_fp32_scalar_magic_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
+ .Test(xnn_qu8_gemm_minmax_fp32_ukernel_4x2__scalar_fmagic, xnn_init_qu8_conv_minmax_fp32_scalar_fmagic_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
}
}
}
-TEST(QU8_GEMM_MINMAX_FP32_4X2__SCALAR_MAGIC, k_eq_1_subtile_m) {
+TEST(QU8_GEMM_MINMAX_FP32_4X2__SCALAR_FMAGIC, k_eq_1_subtile_m) {
for (uint32_t m = 1; m <= 4; m++) {
GemmMicrokernelTester()
.mr(4)
@@ -52045,11 +52045,11 @@
.n(2)
.k(1)
.iterations(1)
- .Test(xnn_qu8_gemm_minmax_fp32_ukernel_4x2__scalar_magic, xnn_init_qu8_conv_minmax_fp32_scalar_magic_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
+ .Test(xnn_qu8_gemm_minmax_fp32_ukernel_4x2__scalar_fmagic, xnn_init_qu8_conv_minmax_fp32_scalar_fmagic_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
}
}
-TEST(QU8_GEMM_MINMAX_FP32_4X2__SCALAR_MAGIC, k_eq_1_subtile_n) {
+TEST(QU8_GEMM_MINMAX_FP32_4X2__SCALAR_FMAGIC, k_eq_1_subtile_n) {
for (uint32_t n = 1; n <= 2; n++) {
GemmMicrokernelTester()
.mr(4)
@@ -52060,11 +52060,11 @@
.n(n)
.k(1)
.iterations(1)
- .Test(xnn_qu8_gemm_minmax_fp32_ukernel_4x2__scalar_magic, xnn_init_qu8_conv_minmax_fp32_scalar_magic_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
+ .Test(xnn_qu8_gemm_minmax_fp32_ukernel_4x2__scalar_fmagic, xnn_init_qu8_conv_minmax_fp32_scalar_fmagic_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
}
}
-TEST(QU8_GEMM_MINMAX_FP32_4X2__SCALAR_MAGIC, k_gt_1) {
+TEST(QU8_GEMM_MINMAX_FP32_4X2__SCALAR_FMAGIC, k_gt_1) {
for (size_t k = 2; k < 10; k++) {
GemmMicrokernelTester()
.mr(4)
@@ -52074,11 +52074,11 @@
.m(4)
.n(2)
.k(k)
- .Test(xnn_qu8_gemm_minmax_fp32_ukernel_4x2__scalar_magic, xnn_init_qu8_conv_minmax_fp32_scalar_magic_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
+ .Test(xnn_qu8_gemm_minmax_fp32_ukernel_4x2__scalar_fmagic, xnn_init_qu8_conv_minmax_fp32_scalar_fmagic_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
}
}
-TEST(QU8_GEMM_MINMAX_FP32_4X2__SCALAR_MAGIC, k_gt_1_strided_a) {
+TEST(QU8_GEMM_MINMAX_FP32_4X2__SCALAR_FMAGIC, k_gt_1_strided_a) {
for (size_t k = 2; k < 10; k++) {
GemmMicrokernelTester()
.mr(4)
@@ -52089,11 +52089,11 @@
.n(2)
.k(k)
.a_stride(11)
- .Test(xnn_qu8_gemm_minmax_fp32_ukernel_4x2__scalar_magic, xnn_init_qu8_conv_minmax_fp32_scalar_magic_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
+ .Test(xnn_qu8_gemm_minmax_fp32_ukernel_4x2__scalar_fmagic, xnn_init_qu8_conv_minmax_fp32_scalar_fmagic_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
}
}
-TEST(QU8_GEMM_MINMAX_FP32_4X2__SCALAR_MAGIC, k_gt_1_subtile) {
+TEST(QU8_GEMM_MINMAX_FP32_4X2__SCALAR_FMAGIC, k_gt_1_subtile) {
for (size_t k = 2; k < 10; k++) {
for (uint32_t m = 1; m <= 4; m++) {
for (uint32_t n = 1; n <= 2; n++) {
@@ -52106,13 +52106,13 @@
.n(n)
.k(k)
.iterations(1)
- .Test(xnn_qu8_gemm_minmax_fp32_ukernel_4x2__scalar_magic, xnn_init_qu8_conv_minmax_fp32_scalar_magic_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
+ .Test(xnn_qu8_gemm_minmax_fp32_ukernel_4x2__scalar_fmagic, xnn_init_qu8_conv_minmax_fp32_scalar_fmagic_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
}
}
}
}
-TEST(QU8_GEMM_MINMAX_FP32_4X2__SCALAR_MAGIC, n_gt_2) {
+TEST(QU8_GEMM_MINMAX_FP32_4X2__SCALAR_FMAGIC, n_gt_2) {
for (uint32_t n = 3; n < 4; n++) {
for (size_t k = 1; k <= 5; k += 2) {
GemmMicrokernelTester()
@@ -52123,12 +52123,12 @@
.m(4)
.n(2)
.k(k)
- .Test(xnn_qu8_gemm_minmax_fp32_ukernel_4x2__scalar_magic, xnn_init_qu8_conv_minmax_fp32_scalar_magic_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
+ .Test(xnn_qu8_gemm_minmax_fp32_ukernel_4x2__scalar_fmagic, xnn_init_qu8_conv_minmax_fp32_scalar_fmagic_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
}
}
}
-TEST(QU8_GEMM_MINMAX_FP32_4X2__SCALAR_MAGIC, n_gt_2_strided_cn) {
+TEST(QU8_GEMM_MINMAX_FP32_4X2__SCALAR_FMAGIC, n_gt_2_strided_cn) {
for (uint32_t n = 3; n < 4; n++) {
for (size_t k = 1; k <= 5; k += 2) {
GemmMicrokernelTester()
@@ -52140,12 +52140,12 @@
.n(2)
.k(k)
.cn_stride(5)
- .Test(xnn_qu8_gemm_minmax_fp32_ukernel_4x2__scalar_magic, xnn_init_qu8_conv_minmax_fp32_scalar_magic_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
+ .Test(xnn_qu8_gemm_minmax_fp32_ukernel_4x2__scalar_fmagic, xnn_init_qu8_conv_minmax_fp32_scalar_fmagic_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
}
}
}
-TEST(QU8_GEMM_MINMAX_FP32_4X2__SCALAR_MAGIC, n_gt_2_strided_a) {
+TEST(QU8_GEMM_MINMAX_FP32_4X2__SCALAR_FMAGIC, n_gt_2_strided_a) {
for (uint32_t n = 3; n < 4; n++) {
for (size_t k = 1; k <= 5; k += 2) {
GemmMicrokernelTester()
@@ -52157,12 +52157,12 @@
.n(n)
.k(k)
.a_stride(7)
- .Test(xnn_qu8_gemm_minmax_fp32_ukernel_4x2__scalar_magic, xnn_init_qu8_conv_minmax_fp32_scalar_magic_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
+ .Test(xnn_qu8_gemm_minmax_fp32_ukernel_4x2__scalar_fmagic, xnn_init_qu8_conv_minmax_fp32_scalar_fmagic_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
}
}
}
-TEST(QU8_GEMM_MINMAX_FP32_4X2__SCALAR_MAGIC, n_gt_2_subtile) {
+TEST(QU8_GEMM_MINMAX_FP32_4X2__SCALAR_FMAGIC, n_gt_2_subtile) {
for (uint32_t n = 3; n < 4; n++) {
for (size_t k = 1; k <= 5; k += 2) {
for (uint32_t m = 1; m <= 4; m++) {
@@ -52175,13 +52175,13 @@
.n(n)
.k(k)
.iterations(1)
- .Test(xnn_qu8_gemm_minmax_fp32_ukernel_4x2__scalar_magic, xnn_init_qu8_conv_minmax_fp32_scalar_magic_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
+ .Test(xnn_qu8_gemm_minmax_fp32_ukernel_4x2__scalar_fmagic, xnn_init_qu8_conv_minmax_fp32_scalar_fmagic_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
}
}
}
}
-TEST(QU8_GEMM_MINMAX_FP32_4X2__SCALAR_MAGIC, n_div_2) {
+TEST(QU8_GEMM_MINMAX_FP32_4X2__SCALAR_FMAGIC, n_div_2) {
for (uint32_t n = 4; n <= 6; n += 2) {
for (size_t k = 1; k <= 5; k += 2) {
GemmMicrokernelTester()
@@ -52192,12 +52192,12 @@
.m(4)
.n(2)
.k(k)
- .Test(xnn_qu8_gemm_minmax_fp32_ukernel_4x2__scalar_magic, xnn_init_qu8_conv_minmax_fp32_scalar_magic_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
+ .Test(xnn_qu8_gemm_minmax_fp32_ukernel_4x2__scalar_fmagic, xnn_init_qu8_conv_minmax_fp32_scalar_fmagic_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
}
}
}
-TEST(QU8_GEMM_MINMAX_FP32_4X2__SCALAR_MAGIC, n_div_2_strided_cn) {
+TEST(QU8_GEMM_MINMAX_FP32_4X2__SCALAR_FMAGIC, n_div_2_strided_cn) {
for (uint32_t n = 4; n <= 6; n += 2) {
for (size_t k = 1; k <= 5; k += 2) {
GemmMicrokernelTester()
@@ -52209,12 +52209,12 @@
.n(n)
.k(k)
.cn_stride(5)
- .Test(xnn_qu8_gemm_minmax_fp32_ukernel_4x2__scalar_magic, xnn_init_qu8_conv_minmax_fp32_scalar_magic_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
+ .Test(xnn_qu8_gemm_minmax_fp32_ukernel_4x2__scalar_fmagic, xnn_init_qu8_conv_minmax_fp32_scalar_fmagic_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
}
}
}
-TEST(QU8_GEMM_MINMAX_FP32_4X2__SCALAR_MAGIC, n_div_2_strided_a) {
+TEST(QU8_GEMM_MINMAX_FP32_4X2__SCALAR_FMAGIC, n_div_2_strided_a) {
for (uint32_t n = 4; n <= 6; n += 2) {
for (size_t k = 1; k <= 5; k += 2) {
GemmMicrokernelTester()
@@ -52226,12 +52226,12 @@
.n(n)
.k(k)
.a_stride(7)
- .Test(xnn_qu8_gemm_minmax_fp32_ukernel_4x2__scalar_magic, xnn_init_qu8_conv_minmax_fp32_scalar_magic_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
+ .Test(xnn_qu8_gemm_minmax_fp32_ukernel_4x2__scalar_fmagic, xnn_init_qu8_conv_minmax_fp32_scalar_fmagic_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
}
}
}
-TEST(QU8_GEMM_MINMAX_FP32_4X2__SCALAR_MAGIC, n_div_2_subtile) {
+TEST(QU8_GEMM_MINMAX_FP32_4X2__SCALAR_FMAGIC, n_div_2_subtile) {
for (uint32_t n = 4; n <= 6; n += 2) {
for (size_t k = 1; k <= 5; k += 2) {
for (uint32_t m = 1; m <= 4; m++) {
@@ -52244,13 +52244,13 @@
.n(n)
.k(k)
.iterations(1)
- .Test(xnn_qu8_gemm_minmax_fp32_ukernel_4x2__scalar_magic, xnn_init_qu8_conv_minmax_fp32_scalar_magic_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
+ .Test(xnn_qu8_gemm_minmax_fp32_ukernel_4x2__scalar_fmagic, xnn_init_qu8_conv_minmax_fp32_scalar_fmagic_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
}
}
}
}
-TEST(QU8_GEMM_MINMAX_FP32_4X2__SCALAR_MAGIC, strided_cm_subtile) {
+TEST(QU8_GEMM_MINMAX_FP32_4X2__SCALAR_FMAGIC, strided_cm_subtile) {
for (size_t k = 1; k <= 5; k += 2) {
for (uint32_t m = 1; m <= 4; m++) {
for (uint32_t n = 1; n <= 2; n++) {
@@ -52264,13 +52264,13 @@
.k(k)
.cm_stride(5)
.iterations(1)
- .Test(xnn_qu8_gemm_minmax_fp32_ukernel_4x2__scalar_magic, xnn_init_qu8_conv_minmax_fp32_scalar_magic_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
+ .Test(xnn_qu8_gemm_minmax_fp32_ukernel_4x2__scalar_fmagic, xnn_init_qu8_conv_minmax_fp32_scalar_fmagic_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
}
}
}
}
-TEST(QU8_GEMM_MINMAX_FP32_4X2__SCALAR_MAGIC, qmin) {
+TEST(QU8_GEMM_MINMAX_FP32_4X2__SCALAR_FMAGIC, qmin) {
GemmMicrokernelTester()
.mr(4)
.nr(2)
@@ -52280,10 +52280,10 @@
.n(2)
.k(1)
.qmin(128)
- .Test(xnn_qu8_gemm_minmax_fp32_ukernel_4x2__scalar_magic, xnn_init_qu8_conv_minmax_fp32_scalar_magic_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
+ .Test(xnn_qu8_gemm_minmax_fp32_ukernel_4x2__scalar_fmagic, xnn_init_qu8_conv_minmax_fp32_scalar_fmagic_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
}
-TEST(QU8_GEMM_MINMAX_FP32_4X2__SCALAR_MAGIC, qmax) {
+TEST(QU8_GEMM_MINMAX_FP32_4X2__SCALAR_FMAGIC, qmax) {
GemmMicrokernelTester()
.mr(4)
.nr(2)
@@ -52293,10 +52293,10 @@
.n(2)
.k(1)
.qmax(128)
- .Test(xnn_qu8_gemm_minmax_fp32_ukernel_4x2__scalar_magic, xnn_init_qu8_conv_minmax_fp32_scalar_magic_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
+ .Test(xnn_qu8_gemm_minmax_fp32_ukernel_4x2__scalar_fmagic, xnn_init_qu8_conv_minmax_fp32_scalar_fmagic_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
}
-TEST(QU8_GEMM_MINMAX_FP32_4X2__SCALAR_MAGIC, strided_cm) {
+TEST(QU8_GEMM_MINMAX_FP32_4X2__SCALAR_FMAGIC, strided_cm) {
GemmMicrokernelTester()
.mr(4)
.nr(2)
@@ -52306,10 +52306,10 @@
.n(2)
.k(1)
.cm_stride(5)
- .Test(xnn_qu8_gemm_minmax_fp32_ukernel_4x2__scalar_magic, xnn_init_qu8_conv_minmax_fp32_scalar_magic_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
+ .Test(xnn_qu8_gemm_minmax_fp32_ukernel_4x2__scalar_fmagic, xnn_init_qu8_conv_minmax_fp32_scalar_fmagic_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
}
-TEST(QU8_GEMM_MINMAX_FP32_4X2__SCALAR_MAGIC, no_a_zero_point) {
+TEST(QU8_GEMM_MINMAX_FP32_4X2__SCALAR_FMAGIC, no_a_zero_point) {
for (size_t k = 1; k <= 5; k += 2) {
GemmMicrokernelTester()
.mr(4)
@@ -52320,11 +52320,11 @@
.n(2)
.k(k)
.a_zero_point(0)
- .Test(xnn_qu8_gemm_minmax_fp32_ukernel_4x2__scalar_magic, xnn_init_qu8_conv_minmax_fp32_scalar_magic_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
+ .Test(xnn_qu8_gemm_minmax_fp32_ukernel_4x2__scalar_fmagic, xnn_init_qu8_conv_minmax_fp32_scalar_fmagic_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
}
}
-TEST(QU8_GEMM_MINMAX_FP32_4X2__SCALAR_MAGIC, no_b_zero_point) {
+TEST(QU8_GEMM_MINMAX_FP32_4X2__SCALAR_FMAGIC, no_b_zero_point) {
for (size_t k = 1; k <= 5; k += 2) {
GemmMicrokernelTester()
.mr(4)
@@ -52335,11 +52335,11 @@
.n(2)
.k(k)
.b_zero_point(0)
- .Test(xnn_qu8_gemm_minmax_fp32_ukernel_4x2__scalar_magic, xnn_init_qu8_conv_minmax_fp32_scalar_magic_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
+ .Test(xnn_qu8_gemm_minmax_fp32_ukernel_4x2__scalar_fmagic, xnn_init_qu8_conv_minmax_fp32_scalar_fmagic_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
}
}
-TEST(QU8_GEMM_MINMAX_FP32_4X2__SCALAR_MAGIC, no_zero_point) {
+TEST(QU8_GEMM_MINMAX_FP32_4X2__SCALAR_FMAGIC, no_zero_point) {
for (size_t k = 1; k <= 5; k += 2) {
GemmMicrokernelTester()
.mr(4)
@@ -52351,11 +52351,11 @@
.k(k)
.a_zero_point(0)
.b_zero_point(0)
- .Test(xnn_qu8_gemm_minmax_fp32_ukernel_4x2__scalar_magic, xnn_init_qu8_conv_minmax_fp32_scalar_magic_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
+ .Test(xnn_qu8_gemm_minmax_fp32_ukernel_4x2__scalar_fmagic, xnn_init_qu8_conv_minmax_fp32_scalar_fmagic_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
}
}
-TEST(QU8_GEMM_MINMAX_FP32_1X4__SCALAR_MAGIC, k_eq_1) {
+TEST(QU8_GEMM_MINMAX_FP32_1X4__SCALAR_FMAGIC, k_eq_1) {
GemmMicrokernelTester()
.mr(1)
.nr(4)
@@ -52364,10 +52364,10 @@
.m(1)
.n(4)
.k(1)
- .Test(xnn_qu8_gemm_minmax_fp32_ukernel_1x4__scalar_magic, xnn_init_qu8_conv_minmax_fp32_scalar_magic_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
+ .Test(xnn_qu8_gemm_minmax_fp32_ukernel_1x4__scalar_fmagic, xnn_init_qu8_conv_minmax_fp32_scalar_fmagic_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
}
-TEST(QU8_GEMM_MINMAX_FP32_1X4__SCALAR_MAGIC, strided_cn) {
+TEST(QU8_GEMM_MINMAX_FP32_1X4__SCALAR_FMAGIC, strided_cn) {
GemmMicrokernelTester()
.mr(1)
.nr(4)
@@ -52377,10 +52377,10 @@
.n(4)
.k(1)
.cn_stride(7)
- .Test(xnn_qu8_gemm_minmax_fp32_ukernel_1x4__scalar_magic, xnn_init_qu8_conv_minmax_fp32_scalar_magic_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
+ .Test(xnn_qu8_gemm_minmax_fp32_ukernel_1x4__scalar_fmagic, xnn_init_qu8_conv_minmax_fp32_scalar_fmagic_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
}
-TEST(QU8_GEMM_MINMAX_FP32_1X4__SCALAR_MAGIC, k_eq_1_strided_a) {
+TEST(QU8_GEMM_MINMAX_FP32_1X4__SCALAR_FMAGIC, k_eq_1_strided_a) {
GemmMicrokernelTester()
.mr(1)
.nr(4)
@@ -52390,10 +52390,10 @@
.n(4)
.k(1)
.a_stride(3)
- .Test(xnn_qu8_gemm_minmax_fp32_ukernel_1x4__scalar_magic, xnn_init_qu8_conv_minmax_fp32_scalar_magic_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
+ .Test(xnn_qu8_gemm_minmax_fp32_ukernel_1x4__scalar_fmagic, xnn_init_qu8_conv_minmax_fp32_scalar_fmagic_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
}
-TEST(QU8_GEMM_MINMAX_FP32_1X4__SCALAR_MAGIC, k_eq_1_subtile) {
+TEST(QU8_GEMM_MINMAX_FP32_1X4__SCALAR_FMAGIC, k_eq_1_subtile) {
for (uint32_t m = 1; m <= 1; m++) {
for (uint32_t n = 1; n <= 4; n++) {
GemmMicrokernelTester()
@@ -52405,12 +52405,12 @@
.n(n)
.k(1)
.iterations(1)
- .Test(xnn_qu8_gemm_minmax_fp32_ukernel_1x4__scalar_magic, xnn_init_qu8_conv_minmax_fp32_scalar_magic_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
+ .Test(xnn_qu8_gemm_minmax_fp32_ukernel_1x4__scalar_fmagic, xnn_init_qu8_conv_minmax_fp32_scalar_fmagic_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
}
}
}
-TEST(QU8_GEMM_MINMAX_FP32_1X4__SCALAR_MAGIC, k_eq_1_subtile_m) {
+TEST(QU8_GEMM_MINMAX_FP32_1X4__SCALAR_FMAGIC, k_eq_1_subtile_m) {
for (uint32_t m = 1; m <= 1; m++) {
GemmMicrokernelTester()
.mr(1)
@@ -52421,11 +52421,11 @@
.n(4)
.k(1)
.iterations(1)
- .Test(xnn_qu8_gemm_minmax_fp32_ukernel_1x4__scalar_magic, xnn_init_qu8_conv_minmax_fp32_scalar_magic_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
+ .Test(xnn_qu8_gemm_minmax_fp32_ukernel_1x4__scalar_fmagic, xnn_init_qu8_conv_minmax_fp32_scalar_fmagic_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
}
}
-TEST(QU8_GEMM_MINMAX_FP32_1X4__SCALAR_MAGIC, k_eq_1_subtile_n) {
+TEST(QU8_GEMM_MINMAX_FP32_1X4__SCALAR_FMAGIC, k_eq_1_subtile_n) {
for (uint32_t n = 1; n <= 4; n++) {
GemmMicrokernelTester()
.mr(1)
@@ -52436,11 +52436,11 @@
.n(n)
.k(1)
.iterations(1)
- .Test(xnn_qu8_gemm_minmax_fp32_ukernel_1x4__scalar_magic, xnn_init_qu8_conv_minmax_fp32_scalar_magic_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
+ .Test(xnn_qu8_gemm_minmax_fp32_ukernel_1x4__scalar_fmagic, xnn_init_qu8_conv_minmax_fp32_scalar_fmagic_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
}
}
-TEST(QU8_GEMM_MINMAX_FP32_1X4__SCALAR_MAGIC, k_gt_1) {
+TEST(QU8_GEMM_MINMAX_FP32_1X4__SCALAR_FMAGIC, k_gt_1) {
for (size_t k = 2; k < 10; k++) {
GemmMicrokernelTester()
.mr(1)
@@ -52450,11 +52450,11 @@
.m(1)
.n(4)
.k(k)
- .Test(xnn_qu8_gemm_minmax_fp32_ukernel_1x4__scalar_magic, xnn_init_qu8_conv_minmax_fp32_scalar_magic_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
+ .Test(xnn_qu8_gemm_minmax_fp32_ukernel_1x4__scalar_fmagic, xnn_init_qu8_conv_minmax_fp32_scalar_fmagic_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
}
}
-TEST(QU8_GEMM_MINMAX_FP32_1X4__SCALAR_MAGIC, k_gt_1_strided_a) {
+TEST(QU8_GEMM_MINMAX_FP32_1X4__SCALAR_FMAGIC, k_gt_1_strided_a) {
for (size_t k = 2; k < 10; k++) {
GemmMicrokernelTester()
.mr(1)
@@ -52465,11 +52465,11 @@
.n(4)
.k(k)
.a_stride(11)
- .Test(xnn_qu8_gemm_minmax_fp32_ukernel_1x4__scalar_magic, xnn_init_qu8_conv_minmax_fp32_scalar_magic_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
+ .Test(xnn_qu8_gemm_minmax_fp32_ukernel_1x4__scalar_fmagic, xnn_init_qu8_conv_minmax_fp32_scalar_fmagic_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
}
}
-TEST(QU8_GEMM_MINMAX_FP32_1X4__SCALAR_MAGIC, k_gt_1_subtile) {
+TEST(QU8_GEMM_MINMAX_FP32_1X4__SCALAR_FMAGIC, k_gt_1_subtile) {
for (size_t k = 2; k < 10; k++) {
for (uint32_t m = 1; m <= 1; m++) {
for (uint32_t n = 1; n <= 4; n++) {
@@ -52482,13 +52482,13 @@
.n(n)
.k(k)
.iterations(1)
- .Test(xnn_qu8_gemm_minmax_fp32_ukernel_1x4__scalar_magic, xnn_init_qu8_conv_minmax_fp32_scalar_magic_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
+ .Test(xnn_qu8_gemm_minmax_fp32_ukernel_1x4__scalar_fmagic, xnn_init_qu8_conv_minmax_fp32_scalar_fmagic_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
}
}
}
}
-TEST(QU8_GEMM_MINMAX_FP32_1X4__SCALAR_MAGIC, n_gt_4) {
+TEST(QU8_GEMM_MINMAX_FP32_1X4__SCALAR_FMAGIC, n_gt_4) {
for (uint32_t n = 5; n < 8; n++) {
for (size_t k = 1; k <= 5; k += 2) {
GemmMicrokernelTester()
@@ -52499,12 +52499,12 @@
.m(1)
.n(4)
.k(k)
- .Test(xnn_qu8_gemm_minmax_fp32_ukernel_1x4__scalar_magic, xnn_init_qu8_conv_minmax_fp32_scalar_magic_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
+ .Test(xnn_qu8_gemm_minmax_fp32_ukernel_1x4__scalar_fmagic, xnn_init_qu8_conv_minmax_fp32_scalar_fmagic_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
}
}
}
-TEST(QU8_GEMM_MINMAX_FP32_1X4__SCALAR_MAGIC, n_gt_4_strided_cn) {
+TEST(QU8_GEMM_MINMAX_FP32_1X4__SCALAR_FMAGIC, n_gt_4_strided_cn) {
for (uint32_t n = 5; n < 8; n++) {
for (size_t k = 1; k <= 5; k += 2) {
GemmMicrokernelTester()
@@ -52516,12 +52516,12 @@
.n(4)
.k(k)
.cn_stride(7)
- .Test(xnn_qu8_gemm_minmax_fp32_ukernel_1x4__scalar_magic, xnn_init_qu8_conv_minmax_fp32_scalar_magic_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
+ .Test(xnn_qu8_gemm_minmax_fp32_ukernel_1x4__scalar_fmagic, xnn_init_qu8_conv_minmax_fp32_scalar_fmagic_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
}
}
}
-TEST(QU8_GEMM_MINMAX_FP32_1X4__SCALAR_MAGIC, n_gt_4_strided_a) {
+TEST(QU8_GEMM_MINMAX_FP32_1X4__SCALAR_FMAGIC, n_gt_4_strided_a) {
for (uint32_t n = 5; n < 8; n++) {
for (size_t k = 1; k <= 5; k += 2) {
GemmMicrokernelTester()
@@ -52533,12 +52533,12 @@
.n(n)
.k(k)
.a_stride(7)
- .Test(xnn_qu8_gemm_minmax_fp32_ukernel_1x4__scalar_magic, xnn_init_qu8_conv_minmax_fp32_scalar_magic_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
+ .Test(xnn_qu8_gemm_minmax_fp32_ukernel_1x4__scalar_fmagic, xnn_init_qu8_conv_minmax_fp32_scalar_fmagic_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
}
}
}
-TEST(QU8_GEMM_MINMAX_FP32_1X4__SCALAR_MAGIC, n_gt_4_subtile) {
+TEST(QU8_GEMM_MINMAX_FP32_1X4__SCALAR_FMAGIC, n_gt_4_subtile) {
for (uint32_t n = 5; n < 8; n++) {
for (size_t k = 1; k <= 5; k += 2) {
for (uint32_t m = 1; m <= 1; m++) {
@@ -52551,13 +52551,13 @@
.n(n)
.k(k)
.iterations(1)
- .Test(xnn_qu8_gemm_minmax_fp32_ukernel_1x4__scalar_magic, xnn_init_qu8_conv_minmax_fp32_scalar_magic_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
+ .Test(xnn_qu8_gemm_minmax_fp32_ukernel_1x4__scalar_fmagic, xnn_init_qu8_conv_minmax_fp32_scalar_fmagic_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
}
}
}
}
-TEST(QU8_GEMM_MINMAX_FP32_1X4__SCALAR_MAGIC, n_div_4) {
+TEST(QU8_GEMM_MINMAX_FP32_1X4__SCALAR_FMAGIC, n_div_4) {
for (uint32_t n = 8; n <= 12; n += 4) {
for (size_t k = 1; k <= 5; k += 2) {
GemmMicrokernelTester()
@@ -52568,12 +52568,12 @@
.m(1)
.n(4)
.k(k)
- .Test(xnn_qu8_gemm_minmax_fp32_ukernel_1x4__scalar_magic, xnn_init_qu8_conv_minmax_fp32_scalar_magic_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
+ .Test(xnn_qu8_gemm_minmax_fp32_ukernel_1x4__scalar_fmagic, xnn_init_qu8_conv_minmax_fp32_scalar_fmagic_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
}
}
}
-TEST(QU8_GEMM_MINMAX_FP32_1X4__SCALAR_MAGIC, n_div_4_strided_cn) {
+TEST(QU8_GEMM_MINMAX_FP32_1X4__SCALAR_FMAGIC, n_div_4_strided_cn) {
for (uint32_t n = 8; n <= 12; n += 4) {
for (size_t k = 1; k <= 5; k += 2) {
GemmMicrokernelTester()
@@ -52585,12 +52585,12 @@
.n(n)
.k(k)
.cn_stride(7)
- .Test(xnn_qu8_gemm_minmax_fp32_ukernel_1x4__scalar_magic, xnn_init_qu8_conv_minmax_fp32_scalar_magic_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
+ .Test(xnn_qu8_gemm_minmax_fp32_ukernel_1x4__scalar_fmagic, xnn_init_qu8_conv_minmax_fp32_scalar_fmagic_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
}
}
}
-TEST(QU8_GEMM_MINMAX_FP32_1X4__SCALAR_MAGIC, n_div_4_strided_a) {
+TEST(QU8_GEMM_MINMAX_FP32_1X4__SCALAR_FMAGIC, n_div_4_strided_a) {
for (uint32_t n = 8; n <= 12; n += 4) {
for (size_t k = 1; k <= 5; k += 2) {
GemmMicrokernelTester()
@@ -52602,12 +52602,12 @@
.n(n)
.k(k)
.a_stride(7)
- .Test(xnn_qu8_gemm_minmax_fp32_ukernel_1x4__scalar_magic, xnn_init_qu8_conv_minmax_fp32_scalar_magic_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
+ .Test(xnn_qu8_gemm_minmax_fp32_ukernel_1x4__scalar_fmagic, xnn_init_qu8_conv_minmax_fp32_scalar_fmagic_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
}
}
}
-TEST(QU8_GEMM_MINMAX_FP32_1X4__SCALAR_MAGIC, n_div_4_subtile) {
+TEST(QU8_GEMM_MINMAX_FP32_1X4__SCALAR_FMAGIC, n_div_4_subtile) {
for (uint32_t n = 8; n <= 12; n += 4) {
for (size_t k = 1; k <= 5; k += 2) {
for (uint32_t m = 1; m <= 1; m++) {
@@ -52620,13 +52620,13 @@
.n(n)
.k(k)
.iterations(1)
- .Test(xnn_qu8_gemm_minmax_fp32_ukernel_1x4__scalar_magic, xnn_init_qu8_conv_minmax_fp32_scalar_magic_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
+ .Test(xnn_qu8_gemm_minmax_fp32_ukernel_1x4__scalar_fmagic, xnn_init_qu8_conv_minmax_fp32_scalar_fmagic_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
}
}
}
}
-TEST(QU8_GEMM_MINMAX_FP32_1X4__SCALAR_MAGIC, strided_cm_subtile) {
+TEST(QU8_GEMM_MINMAX_FP32_1X4__SCALAR_FMAGIC, strided_cm_subtile) {
for (size_t k = 1; k <= 5; k += 2) {
for (uint32_t m = 1; m <= 1; m++) {
for (uint32_t n = 1; n <= 4; n++) {
@@ -52640,13 +52640,13 @@
.k(k)
.cm_stride(7)
.iterations(1)
- .Test(xnn_qu8_gemm_minmax_fp32_ukernel_1x4__scalar_magic, xnn_init_qu8_conv_minmax_fp32_scalar_magic_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
+ .Test(xnn_qu8_gemm_minmax_fp32_ukernel_1x4__scalar_fmagic, xnn_init_qu8_conv_minmax_fp32_scalar_fmagic_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
}
}
}
}
-TEST(QU8_GEMM_MINMAX_FP32_1X4__SCALAR_MAGIC, qmin) {
+TEST(QU8_GEMM_MINMAX_FP32_1X4__SCALAR_FMAGIC, qmin) {
GemmMicrokernelTester()
.mr(1)
.nr(4)
@@ -52656,10 +52656,10 @@
.n(4)
.k(1)
.qmin(128)
- .Test(xnn_qu8_gemm_minmax_fp32_ukernel_1x4__scalar_magic, xnn_init_qu8_conv_minmax_fp32_scalar_magic_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
+ .Test(xnn_qu8_gemm_minmax_fp32_ukernel_1x4__scalar_fmagic, xnn_init_qu8_conv_minmax_fp32_scalar_fmagic_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
}
-TEST(QU8_GEMM_MINMAX_FP32_1X4__SCALAR_MAGIC, qmax) {
+TEST(QU8_GEMM_MINMAX_FP32_1X4__SCALAR_FMAGIC, qmax) {
GemmMicrokernelTester()
.mr(1)
.nr(4)
@@ -52669,10 +52669,10 @@
.n(4)
.k(1)
.qmax(128)
- .Test(xnn_qu8_gemm_minmax_fp32_ukernel_1x4__scalar_magic, xnn_init_qu8_conv_minmax_fp32_scalar_magic_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
+ .Test(xnn_qu8_gemm_minmax_fp32_ukernel_1x4__scalar_fmagic, xnn_init_qu8_conv_minmax_fp32_scalar_fmagic_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
}
-TEST(QU8_GEMM_MINMAX_FP32_1X4__SCALAR_MAGIC, strided_cm) {
+TEST(QU8_GEMM_MINMAX_FP32_1X4__SCALAR_FMAGIC, strided_cm) {
GemmMicrokernelTester()
.mr(1)
.nr(4)
@@ -52682,10 +52682,10 @@
.n(4)
.k(1)
.cm_stride(7)
- .Test(xnn_qu8_gemm_minmax_fp32_ukernel_1x4__scalar_magic, xnn_init_qu8_conv_minmax_fp32_scalar_magic_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
+ .Test(xnn_qu8_gemm_minmax_fp32_ukernel_1x4__scalar_fmagic, xnn_init_qu8_conv_minmax_fp32_scalar_fmagic_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
}
-TEST(QU8_GEMM_MINMAX_FP32_1X4__SCALAR_MAGIC, no_a_zero_point) {
+TEST(QU8_GEMM_MINMAX_FP32_1X4__SCALAR_FMAGIC, no_a_zero_point) {
for (size_t k = 1; k <= 5; k += 2) {
GemmMicrokernelTester()
.mr(1)
@@ -52696,11 +52696,11 @@
.n(4)
.k(k)
.a_zero_point(0)
- .Test(xnn_qu8_gemm_minmax_fp32_ukernel_1x4__scalar_magic, xnn_init_qu8_conv_minmax_fp32_scalar_magic_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
+ .Test(xnn_qu8_gemm_minmax_fp32_ukernel_1x4__scalar_fmagic, xnn_init_qu8_conv_minmax_fp32_scalar_fmagic_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
}
}
-TEST(QU8_GEMM_MINMAX_FP32_1X4__SCALAR_MAGIC, no_b_zero_point) {
+TEST(QU8_GEMM_MINMAX_FP32_1X4__SCALAR_FMAGIC, no_b_zero_point) {
for (size_t k = 1; k <= 5; k += 2) {
GemmMicrokernelTester()
.mr(1)
@@ -52711,11 +52711,11 @@
.n(4)
.k(k)
.b_zero_point(0)
- .Test(xnn_qu8_gemm_minmax_fp32_ukernel_1x4__scalar_magic, xnn_init_qu8_conv_minmax_fp32_scalar_magic_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
+ .Test(xnn_qu8_gemm_minmax_fp32_ukernel_1x4__scalar_fmagic, xnn_init_qu8_conv_minmax_fp32_scalar_fmagic_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
}
}
-TEST(QU8_GEMM_MINMAX_FP32_1X4__SCALAR_MAGIC, no_zero_point) {
+TEST(QU8_GEMM_MINMAX_FP32_1X4__SCALAR_FMAGIC, no_zero_point) {
for (size_t k = 1; k <= 5; k += 2) {
GemmMicrokernelTester()
.mr(1)
@@ -52727,11 +52727,11 @@
.k(k)
.a_zero_point(0)
.b_zero_point(0)
- .Test(xnn_qu8_gemm_minmax_fp32_ukernel_1x4__scalar_magic, xnn_init_qu8_conv_minmax_fp32_scalar_magic_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
+ .Test(xnn_qu8_gemm_minmax_fp32_ukernel_1x4__scalar_fmagic, xnn_init_qu8_conv_minmax_fp32_scalar_fmagic_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
}
}
-TEST(QU8_GEMM_MINMAX_FP32_2X4__SCALAR_MAGIC, k_eq_1) {
+TEST(QU8_GEMM_MINMAX_FP32_2X4__SCALAR_FMAGIC, k_eq_1) {
GemmMicrokernelTester()
.mr(2)
.nr(4)
@@ -52740,10 +52740,10 @@
.m(2)
.n(4)
.k(1)
- .Test(xnn_qu8_gemm_minmax_fp32_ukernel_2x4__scalar_magic, xnn_init_qu8_conv_minmax_fp32_scalar_magic_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
+ .Test(xnn_qu8_gemm_minmax_fp32_ukernel_2x4__scalar_fmagic, xnn_init_qu8_conv_minmax_fp32_scalar_fmagic_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
}
-TEST(QU8_GEMM_MINMAX_FP32_2X4__SCALAR_MAGIC, strided_cn) {
+TEST(QU8_GEMM_MINMAX_FP32_2X4__SCALAR_FMAGIC, strided_cn) {
GemmMicrokernelTester()
.mr(2)
.nr(4)
@@ -52753,10 +52753,10 @@
.n(4)
.k(1)
.cn_stride(7)
- .Test(xnn_qu8_gemm_minmax_fp32_ukernel_2x4__scalar_magic, xnn_init_qu8_conv_minmax_fp32_scalar_magic_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
+ .Test(xnn_qu8_gemm_minmax_fp32_ukernel_2x4__scalar_fmagic, xnn_init_qu8_conv_minmax_fp32_scalar_fmagic_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
}
-TEST(QU8_GEMM_MINMAX_FP32_2X4__SCALAR_MAGIC, k_eq_1_strided_a) {
+TEST(QU8_GEMM_MINMAX_FP32_2X4__SCALAR_FMAGIC, k_eq_1_strided_a) {
GemmMicrokernelTester()
.mr(2)
.nr(4)
@@ -52766,10 +52766,10 @@
.n(4)
.k(1)
.a_stride(3)
- .Test(xnn_qu8_gemm_minmax_fp32_ukernel_2x4__scalar_magic, xnn_init_qu8_conv_minmax_fp32_scalar_magic_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
+ .Test(xnn_qu8_gemm_minmax_fp32_ukernel_2x4__scalar_fmagic, xnn_init_qu8_conv_minmax_fp32_scalar_fmagic_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
}
-TEST(QU8_GEMM_MINMAX_FP32_2X4__SCALAR_MAGIC, k_eq_1_subtile) {
+TEST(QU8_GEMM_MINMAX_FP32_2X4__SCALAR_FMAGIC, k_eq_1_subtile) {
for (uint32_t m = 1; m <= 2; m++) {
for (uint32_t n = 1; n <= 4; n++) {
GemmMicrokernelTester()
@@ -52781,12 +52781,12 @@
.n(n)
.k(1)
.iterations(1)
- .Test(xnn_qu8_gemm_minmax_fp32_ukernel_2x4__scalar_magic, xnn_init_qu8_conv_minmax_fp32_scalar_magic_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
+ .Test(xnn_qu8_gemm_minmax_fp32_ukernel_2x4__scalar_fmagic, xnn_init_qu8_conv_minmax_fp32_scalar_fmagic_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
}
}
}
-TEST(QU8_GEMM_MINMAX_FP32_2X4__SCALAR_MAGIC, k_eq_1_subtile_m) {
+TEST(QU8_GEMM_MINMAX_FP32_2X4__SCALAR_FMAGIC, k_eq_1_subtile_m) {
for (uint32_t m = 1; m <= 2; m++) {
GemmMicrokernelTester()
.mr(2)
@@ -52797,11 +52797,11 @@
.n(4)
.k(1)
.iterations(1)
- .Test(xnn_qu8_gemm_minmax_fp32_ukernel_2x4__scalar_magic, xnn_init_qu8_conv_minmax_fp32_scalar_magic_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
+ .Test(xnn_qu8_gemm_minmax_fp32_ukernel_2x4__scalar_fmagic, xnn_init_qu8_conv_minmax_fp32_scalar_fmagic_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
}
}
-TEST(QU8_GEMM_MINMAX_FP32_2X4__SCALAR_MAGIC, k_eq_1_subtile_n) {
+TEST(QU8_GEMM_MINMAX_FP32_2X4__SCALAR_FMAGIC, k_eq_1_subtile_n) {
for (uint32_t n = 1; n <= 4; n++) {
GemmMicrokernelTester()
.mr(2)
@@ -52812,11 +52812,11 @@
.n(n)
.k(1)
.iterations(1)
- .Test(xnn_qu8_gemm_minmax_fp32_ukernel_2x4__scalar_magic, xnn_init_qu8_conv_minmax_fp32_scalar_magic_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
+ .Test(xnn_qu8_gemm_minmax_fp32_ukernel_2x4__scalar_fmagic, xnn_init_qu8_conv_minmax_fp32_scalar_fmagic_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
}
}
-TEST(QU8_GEMM_MINMAX_FP32_2X4__SCALAR_MAGIC, k_gt_1) {
+TEST(QU8_GEMM_MINMAX_FP32_2X4__SCALAR_FMAGIC, k_gt_1) {
for (size_t k = 2; k < 10; k++) {
GemmMicrokernelTester()
.mr(2)
@@ -52826,11 +52826,11 @@
.m(2)
.n(4)
.k(k)
- .Test(xnn_qu8_gemm_minmax_fp32_ukernel_2x4__scalar_magic, xnn_init_qu8_conv_minmax_fp32_scalar_magic_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
+ .Test(xnn_qu8_gemm_minmax_fp32_ukernel_2x4__scalar_fmagic, xnn_init_qu8_conv_minmax_fp32_scalar_fmagic_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
}
}
-TEST(QU8_GEMM_MINMAX_FP32_2X4__SCALAR_MAGIC, k_gt_1_strided_a) {
+TEST(QU8_GEMM_MINMAX_FP32_2X4__SCALAR_FMAGIC, k_gt_1_strided_a) {
for (size_t k = 2; k < 10; k++) {
GemmMicrokernelTester()
.mr(2)
@@ -52841,11 +52841,11 @@
.n(4)
.k(k)
.a_stride(11)
- .Test(xnn_qu8_gemm_minmax_fp32_ukernel_2x4__scalar_magic, xnn_init_qu8_conv_minmax_fp32_scalar_magic_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
+ .Test(xnn_qu8_gemm_minmax_fp32_ukernel_2x4__scalar_fmagic, xnn_init_qu8_conv_minmax_fp32_scalar_fmagic_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
}
}
-TEST(QU8_GEMM_MINMAX_FP32_2X4__SCALAR_MAGIC, k_gt_1_subtile) {
+TEST(QU8_GEMM_MINMAX_FP32_2X4__SCALAR_FMAGIC, k_gt_1_subtile) {
for (size_t k = 2; k < 10; k++) {
for (uint32_t m = 1; m <= 2; m++) {
for (uint32_t n = 1; n <= 4; n++) {
@@ -52858,13 +52858,13 @@
.n(n)
.k(k)
.iterations(1)
- .Test(xnn_qu8_gemm_minmax_fp32_ukernel_2x4__scalar_magic, xnn_init_qu8_conv_minmax_fp32_scalar_magic_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
+ .Test(xnn_qu8_gemm_minmax_fp32_ukernel_2x4__scalar_fmagic, xnn_init_qu8_conv_minmax_fp32_scalar_fmagic_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
}
}
}
}
-TEST(QU8_GEMM_MINMAX_FP32_2X4__SCALAR_MAGIC, n_gt_4) {
+TEST(QU8_GEMM_MINMAX_FP32_2X4__SCALAR_FMAGIC, n_gt_4) {
for (uint32_t n = 5; n < 8; n++) {
for (size_t k = 1; k <= 5; k += 2) {
GemmMicrokernelTester()
@@ -52875,12 +52875,12 @@
.m(2)
.n(4)
.k(k)
- .Test(xnn_qu8_gemm_minmax_fp32_ukernel_2x4__scalar_magic, xnn_init_qu8_conv_minmax_fp32_scalar_magic_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
+ .Test(xnn_qu8_gemm_minmax_fp32_ukernel_2x4__scalar_fmagic, xnn_init_qu8_conv_minmax_fp32_scalar_fmagic_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
}
}
}
-TEST(QU8_GEMM_MINMAX_FP32_2X4__SCALAR_MAGIC, n_gt_4_strided_cn) {
+TEST(QU8_GEMM_MINMAX_FP32_2X4__SCALAR_FMAGIC, n_gt_4_strided_cn) {
for (uint32_t n = 5; n < 8; n++) {
for (size_t k = 1; k <= 5; k += 2) {
GemmMicrokernelTester()
@@ -52892,12 +52892,12 @@
.n(4)
.k(k)
.cn_stride(7)
- .Test(xnn_qu8_gemm_minmax_fp32_ukernel_2x4__scalar_magic, xnn_init_qu8_conv_minmax_fp32_scalar_magic_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
+ .Test(xnn_qu8_gemm_minmax_fp32_ukernel_2x4__scalar_fmagic, xnn_init_qu8_conv_minmax_fp32_scalar_fmagic_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
}
}
}
-TEST(QU8_GEMM_MINMAX_FP32_2X4__SCALAR_MAGIC, n_gt_4_strided_a) {
+TEST(QU8_GEMM_MINMAX_FP32_2X4__SCALAR_FMAGIC, n_gt_4_strided_a) {
for (uint32_t n = 5; n < 8; n++) {
for (size_t k = 1; k <= 5; k += 2) {
GemmMicrokernelTester()
@@ -52909,12 +52909,12 @@
.n(n)
.k(k)
.a_stride(7)
- .Test(xnn_qu8_gemm_minmax_fp32_ukernel_2x4__scalar_magic, xnn_init_qu8_conv_minmax_fp32_scalar_magic_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
+ .Test(xnn_qu8_gemm_minmax_fp32_ukernel_2x4__scalar_fmagic, xnn_init_qu8_conv_minmax_fp32_scalar_fmagic_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
}
}
}
-TEST(QU8_GEMM_MINMAX_FP32_2X4__SCALAR_MAGIC, n_gt_4_subtile) {
+TEST(QU8_GEMM_MINMAX_FP32_2X4__SCALAR_FMAGIC, n_gt_4_subtile) {
for (uint32_t n = 5; n < 8; n++) {
for (size_t k = 1; k <= 5; k += 2) {
for (uint32_t m = 1; m <= 2; m++) {
@@ -52927,13 +52927,13 @@
.n(n)
.k(k)
.iterations(1)
- .Test(xnn_qu8_gemm_minmax_fp32_ukernel_2x4__scalar_magic, xnn_init_qu8_conv_minmax_fp32_scalar_magic_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
+ .Test(xnn_qu8_gemm_minmax_fp32_ukernel_2x4__scalar_fmagic, xnn_init_qu8_conv_minmax_fp32_scalar_fmagic_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
}
}
}
}
-TEST(QU8_GEMM_MINMAX_FP32_2X4__SCALAR_MAGIC, n_div_4) {
+TEST(QU8_GEMM_MINMAX_FP32_2X4__SCALAR_FMAGIC, n_div_4) {
for (uint32_t n = 8; n <= 12; n += 4) {
for (size_t k = 1; k <= 5; k += 2) {
GemmMicrokernelTester()
@@ -52944,12 +52944,12 @@
.m(2)
.n(4)
.k(k)
- .Test(xnn_qu8_gemm_minmax_fp32_ukernel_2x4__scalar_magic, xnn_init_qu8_conv_minmax_fp32_scalar_magic_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
+ .Test(xnn_qu8_gemm_minmax_fp32_ukernel_2x4__scalar_fmagic, xnn_init_qu8_conv_minmax_fp32_scalar_fmagic_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
}
}
}
-TEST(QU8_GEMM_MINMAX_FP32_2X4__SCALAR_MAGIC, n_div_4_strided_cn) {
+TEST(QU8_GEMM_MINMAX_FP32_2X4__SCALAR_FMAGIC, n_div_4_strided_cn) {
for (uint32_t n = 8; n <= 12; n += 4) {
for (size_t k = 1; k <= 5; k += 2) {
GemmMicrokernelTester()
@@ -52961,12 +52961,12 @@
.n(n)
.k(k)
.cn_stride(7)
- .Test(xnn_qu8_gemm_minmax_fp32_ukernel_2x4__scalar_magic, xnn_init_qu8_conv_minmax_fp32_scalar_magic_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
+ .Test(xnn_qu8_gemm_minmax_fp32_ukernel_2x4__scalar_fmagic, xnn_init_qu8_conv_minmax_fp32_scalar_fmagic_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
}
}
}
-TEST(QU8_GEMM_MINMAX_FP32_2X4__SCALAR_MAGIC, n_div_4_strided_a) {
+TEST(QU8_GEMM_MINMAX_FP32_2X4__SCALAR_FMAGIC, n_div_4_strided_a) {
for (uint32_t n = 8; n <= 12; n += 4) {
for (size_t k = 1; k <= 5; k += 2) {
GemmMicrokernelTester()
@@ -52978,12 +52978,12 @@
.n(n)
.k(k)
.a_stride(7)
- .Test(xnn_qu8_gemm_minmax_fp32_ukernel_2x4__scalar_magic, xnn_init_qu8_conv_minmax_fp32_scalar_magic_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
+ .Test(xnn_qu8_gemm_minmax_fp32_ukernel_2x4__scalar_fmagic, xnn_init_qu8_conv_minmax_fp32_scalar_fmagic_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
}
}
}
-TEST(QU8_GEMM_MINMAX_FP32_2X4__SCALAR_MAGIC, n_div_4_subtile) {
+TEST(QU8_GEMM_MINMAX_FP32_2X4__SCALAR_FMAGIC, n_div_4_subtile) {
for (uint32_t n = 8; n <= 12; n += 4) {
for (size_t k = 1; k <= 5; k += 2) {
for (uint32_t m = 1; m <= 2; m++) {
@@ -52996,13 +52996,13 @@
.n(n)
.k(k)
.iterations(1)
- .Test(xnn_qu8_gemm_minmax_fp32_ukernel_2x4__scalar_magic, xnn_init_qu8_conv_minmax_fp32_scalar_magic_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
+ .Test(xnn_qu8_gemm_minmax_fp32_ukernel_2x4__scalar_fmagic, xnn_init_qu8_conv_minmax_fp32_scalar_fmagic_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
}
}
}
}
-TEST(QU8_GEMM_MINMAX_FP32_2X4__SCALAR_MAGIC, strided_cm_subtile) {
+TEST(QU8_GEMM_MINMAX_FP32_2X4__SCALAR_FMAGIC, strided_cm_subtile) {
for (size_t k = 1; k <= 5; k += 2) {
for (uint32_t m = 1; m <= 2; m++) {
for (uint32_t n = 1; n <= 4; n++) {
@@ -53016,13 +53016,13 @@
.k(k)
.cm_stride(7)
.iterations(1)
- .Test(xnn_qu8_gemm_minmax_fp32_ukernel_2x4__scalar_magic, xnn_init_qu8_conv_minmax_fp32_scalar_magic_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
+ .Test(xnn_qu8_gemm_minmax_fp32_ukernel_2x4__scalar_fmagic, xnn_init_qu8_conv_minmax_fp32_scalar_fmagic_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
}
}
}
}
-TEST(QU8_GEMM_MINMAX_FP32_2X4__SCALAR_MAGIC, qmin) {
+TEST(QU8_GEMM_MINMAX_FP32_2X4__SCALAR_FMAGIC, qmin) {
GemmMicrokernelTester()
.mr(2)
.nr(4)
@@ -53032,10 +53032,10 @@
.n(4)
.k(1)
.qmin(128)
- .Test(xnn_qu8_gemm_minmax_fp32_ukernel_2x4__scalar_magic, xnn_init_qu8_conv_minmax_fp32_scalar_magic_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
+ .Test(xnn_qu8_gemm_minmax_fp32_ukernel_2x4__scalar_fmagic, xnn_init_qu8_conv_minmax_fp32_scalar_fmagic_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
}
-TEST(QU8_GEMM_MINMAX_FP32_2X4__SCALAR_MAGIC, qmax) {
+TEST(QU8_GEMM_MINMAX_FP32_2X4__SCALAR_FMAGIC, qmax) {
GemmMicrokernelTester()
.mr(2)
.nr(4)
@@ -53045,10 +53045,10 @@
.n(4)
.k(1)
.qmax(128)
- .Test(xnn_qu8_gemm_minmax_fp32_ukernel_2x4__scalar_magic, xnn_init_qu8_conv_minmax_fp32_scalar_magic_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
+ .Test(xnn_qu8_gemm_minmax_fp32_ukernel_2x4__scalar_fmagic, xnn_init_qu8_conv_minmax_fp32_scalar_fmagic_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
}
-TEST(QU8_GEMM_MINMAX_FP32_2X4__SCALAR_MAGIC, strided_cm) {
+TEST(QU8_GEMM_MINMAX_FP32_2X4__SCALAR_FMAGIC, strided_cm) {
GemmMicrokernelTester()
.mr(2)
.nr(4)
@@ -53058,10 +53058,10 @@
.n(4)
.k(1)
.cm_stride(7)
- .Test(xnn_qu8_gemm_minmax_fp32_ukernel_2x4__scalar_magic, xnn_init_qu8_conv_minmax_fp32_scalar_magic_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
+ .Test(xnn_qu8_gemm_minmax_fp32_ukernel_2x4__scalar_fmagic, xnn_init_qu8_conv_minmax_fp32_scalar_fmagic_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
}
-TEST(QU8_GEMM_MINMAX_FP32_2X4__SCALAR_MAGIC, no_a_zero_point) {
+TEST(QU8_GEMM_MINMAX_FP32_2X4__SCALAR_FMAGIC, no_a_zero_point) {
for (size_t k = 1; k <= 5; k += 2) {
GemmMicrokernelTester()
.mr(2)
@@ -53072,11 +53072,11 @@
.n(4)
.k(k)
.a_zero_point(0)
- .Test(xnn_qu8_gemm_minmax_fp32_ukernel_2x4__scalar_magic, xnn_init_qu8_conv_minmax_fp32_scalar_magic_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
+ .Test(xnn_qu8_gemm_minmax_fp32_ukernel_2x4__scalar_fmagic, xnn_init_qu8_conv_minmax_fp32_scalar_fmagic_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
}
}
-TEST(QU8_GEMM_MINMAX_FP32_2X4__SCALAR_MAGIC, no_b_zero_point) {
+TEST(QU8_GEMM_MINMAX_FP32_2X4__SCALAR_FMAGIC, no_b_zero_point) {
for (size_t k = 1; k <= 5; k += 2) {
GemmMicrokernelTester()
.mr(2)
@@ -53087,11 +53087,11 @@
.n(4)
.k(k)
.b_zero_point(0)
- .Test(xnn_qu8_gemm_minmax_fp32_ukernel_2x4__scalar_magic, xnn_init_qu8_conv_minmax_fp32_scalar_magic_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
+ .Test(xnn_qu8_gemm_minmax_fp32_ukernel_2x4__scalar_fmagic, xnn_init_qu8_conv_minmax_fp32_scalar_fmagic_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
}
}
-TEST(QU8_GEMM_MINMAX_FP32_2X4__SCALAR_MAGIC, no_zero_point) {
+TEST(QU8_GEMM_MINMAX_FP32_2X4__SCALAR_FMAGIC, no_zero_point) {
for (size_t k = 1; k <= 5; k += 2) {
GemmMicrokernelTester()
.mr(2)
@@ -53103,11 +53103,11 @@
.k(k)
.a_zero_point(0)
.b_zero_point(0)
- .Test(xnn_qu8_gemm_minmax_fp32_ukernel_2x4__scalar_magic, xnn_init_qu8_conv_minmax_fp32_scalar_magic_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
+ .Test(xnn_qu8_gemm_minmax_fp32_ukernel_2x4__scalar_fmagic, xnn_init_qu8_conv_minmax_fp32_scalar_fmagic_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
}
}
-TEST(QU8_GEMM_MINMAX_FP32_3X4__SCALAR_MAGIC, k_eq_1) {
+TEST(QU8_GEMM_MINMAX_FP32_3X4__SCALAR_FMAGIC, k_eq_1) {
GemmMicrokernelTester()
.mr(3)
.nr(4)
@@ -53116,10 +53116,10 @@
.m(3)
.n(4)
.k(1)
- .Test(xnn_qu8_gemm_minmax_fp32_ukernel_3x4__scalar_magic, xnn_init_qu8_conv_minmax_fp32_scalar_magic_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
+ .Test(xnn_qu8_gemm_minmax_fp32_ukernel_3x4__scalar_fmagic, xnn_init_qu8_conv_minmax_fp32_scalar_fmagic_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
}
-TEST(QU8_GEMM_MINMAX_FP32_3X4__SCALAR_MAGIC, strided_cn) {
+TEST(QU8_GEMM_MINMAX_FP32_3X4__SCALAR_FMAGIC, strided_cn) {
GemmMicrokernelTester()
.mr(3)
.nr(4)
@@ -53129,10 +53129,10 @@
.n(4)
.k(1)
.cn_stride(7)
- .Test(xnn_qu8_gemm_minmax_fp32_ukernel_3x4__scalar_magic, xnn_init_qu8_conv_minmax_fp32_scalar_magic_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
+ .Test(xnn_qu8_gemm_minmax_fp32_ukernel_3x4__scalar_fmagic, xnn_init_qu8_conv_minmax_fp32_scalar_fmagic_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
}
-TEST(QU8_GEMM_MINMAX_FP32_3X4__SCALAR_MAGIC, k_eq_1_strided_a) {
+TEST(QU8_GEMM_MINMAX_FP32_3X4__SCALAR_FMAGIC, k_eq_1_strided_a) {
GemmMicrokernelTester()
.mr(3)
.nr(4)
@@ -53142,10 +53142,10 @@
.n(4)
.k(1)
.a_stride(3)
- .Test(xnn_qu8_gemm_minmax_fp32_ukernel_3x4__scalar_magic, xnn_init_qu8_conv_minmax_fp32_scalar_magic_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
+ .Test(xnn_qu8_gemm_minmax_fp32_ukernel_3x4__scalar_fmagic, xnn_init_qu8_conv_minmax_fp32_scalar_fmagic_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
}
-TEST(QU8_GEMM_MINMAX_FP32_3X4__SCALAR_MAGIC, k_eq_1_subtile) {
+TEST(QU8_GEMM_MINMAX_FP32_3X4__SCALAR_FMAGIC, k_eq_1_subtile) {
for (uint32_t m = 1; m <= 3; m++) {
for (uint32_t n = 1; n <= 4; n++) {
GemmMicrokernelTester()
@@ -53157,12 +53157,12 @@
.n(n)
.k(1)
.iterations(1)
- .Test(xnn_qu8_gemm_minmax_fp32_ukernel_3x4__scalar_magic, xnn_init_qu8_conv_minmax_fp32_scalar_magic_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
+ .Test(xnn_qu8_gemm_minmax_fp32_ukernel_3x4__scalar_fmagic, xnn_init_qu8_conv_minmax_fp32_scalar_fmagic_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
}
}
}
-TEST(QU8_GEMM_MINMAX_FP32_3X4__SCALAR_MAGIC, k_eq_1_subtile_m) {
+TEST(QU8_GEMM_MINMAX_FP32_3X4__SCALAR_FMAGIC, k_eq_1_subtile_m) {
for (uint32_t m = 1; m <= 3; m++) {
GemmMicrokernelTester()
.mr(3)
@@ -53173,11 +53173,11 @@
.n(4)
.k(1)
.iterations(1)
- .Test(xnn_qu8_gemm_minmax_fp32_ukernel_3x4__scalar_magic, xnn_init_qu8_conv_minmax_fp32_scalar_magic_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
+ .Test(xnn_qu8_gemm_minmax_fp32_ukernel_3x4__scalar_fmagic, xnn_init_qu8_conv_minmax_fp32_scalar_fmagic_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
}
}
-TEST(QU8_GEMM_MINMAX_FP32_3X4__SCALAR_MAGIC, k_eq_1_subtile_n) {
+TEST(QU8_GEMM_MINMAX_FP32_3X4__SCALAR_FMAGIC, k_eq_1_subtile_n) {
for (uint32_t n = 1; n <= 4; n++) {
GemmMicrokernelTester()
.mr(3)
@@ -53188,11 +53188,11 @@
.n(n)
.k(1)
.iterations(1)
- .Test(xnn_qu8_gemm_minmax_fp32_ukernel_3x4__scalar_magic, xnn_init_qu8_conv_minmax_fp32_scalar_magic_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
+ .Test(xnn_qu8_gemm_minmax_fp32_ukernel_3x4__scalar_fmagic, xnn_init_qu8_conv_minmax_fp32_scalar_fmagic_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
}
}
-TEST(QU8_GEMM_MINMAX_FP32_3X4__SCALAR_MAGIC, k_gt_1) {
+TEST(QU8_GEMM_MINMAX_FP32_3X4__SCALAR_FMAGIC, k_gt_1) {
for (size_t k = 2; k < 10; k++) {
GemmMicrokernelTester()
.mr(3)
@@ -53202,11 +53202,11 @@
.m(3)
.n(4)
.k(k)
- .Test(xnn_qu8_gemm_minmax_fp32_ukernel_3x4__scalar_magic, xnn_init_qu8_conv_minmax_fp32_scalar_magic_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
+ .Test(xnn_qu8_gemm_minmax_fp32_ukernel_3x4__scalar_fmagic, xnn_init_qu8_conv_minmax_fp32_scalar_fmagic_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
}
}
-TEST(QU8_GEMM_MINMAX_FP32_3X4__SCALAR_MAGIC, k_gt_1_strided_a) {
+TEST(QU8_GEMM_MINMAX_FP32_3X4__SCALAR_FMAGIC, k_gt_1_strided_a) {
for (size_t k = 2; k < 10; k++) {
GemmMicrokernelTester()
.mr(3)
@@ -53217,11 +53217,11 @@
.n(4)
.k(k)
.a_stride(11)
- .Test(xnn_qu8_gemm_minmax_fp32_ukernel_3x4__scalar_magic, xnn_init_qu8_conv_minmax_fp32_scalar_magic_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
+ .Test(xnn_qu8_gemm_minmax_fp32_ukernel_3x4__scalar_fmagic, xnn_init_qu8_conv_minmax_fp32_scalar_fmagic_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
}
}
-TEST(QU8_GEMM_MINMAX_FP32_3X4__SCALAR_MAGIC, k_gt_1_subtile) {
+TEST(QU8_GEMM_MINMAX_FP32_3X4__SCALAR_FMAGIC, k_gt_1_subtile) {
for (size_t k = 2; k < 10; k++) {
for (uint32_t m = 1; m <= 3; m++) {
for (uint32_t n = 1; n <= 4; n++) {
@@ -53234,13 +53234,13 @@
.n(n)
.k(k)
.iterations(1)
- .Test(xnn_qu8_gemm_minmax_fp32_ukernel_3x4__scalar_magic, xnn_init_qu8_conv_minmax_fp32_scalar_magic_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
+ .Test(xnn_qu8_gemm_minmax_fp32_ukernel_3x4__scalar_fmagic, xnn_init_qu8_conv_minmax_fp32_scalar_fmagic_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
}
}
}
}
-TEST(QU8_GEMM_MINMAX_FP32_3X4__SCALAR_MAGIC, n_gt_4) {
+TEST(QU8_GEMM_MINMAX_FP32_3X4__SCALAR_FMAGIC, n_gt_4) {
for (uint32_t n = 5; n < 8; n++) {
for (size_t k = 1; k <= 5; k += 2) {
GemmMicrokernelTester()
@@ -53251,12 +53251,12 @@
.m(3)
.n(4)
.k(k)
- .Test(xnn_qu8_gemm_minmax_fp32_ukernel_3x4__scalar_magic, xnn_init_qu8_conv_minmax_fp32_scalar_magic_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
+ .Test(xnn_qu8_gemm_minmax_fp32_ukernel_3x4__scalar_fmagic, xnn_init_qu8_conv_minmax_fp32_scalar_fmagic_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
}
}
}
-TEST(QU8_GEMM_MINMAX_FP32_3X4__SCALAR_MAGIC, n_gt_4_strided_cn) {
+TEST(QU8_GEMM_MINMAX_FP32_3X4__SCALAR_FMAGIC, n_gt_4_strided_cn) {
for (uint32_t n = 5; n < 8; n++) {
for (size_t k = 1; k <= 5; k += 2) {
GemmMicrokernelTester()
@@ -53268,12 +53268,12 @@
.n(4)
.k(k)
.cn_stride(7)
- .Test(xnn_qu8_gemm_minmax_fp32_ukernel_3x4__scalar_magic, xnn_init_qu8_conv_minmax_fp32_scalar_magic_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
+ .Test(xnn_qu8_gemm_minmax_fp32_ukernel_3x4__scalar_fmagic, xnn_init_qu8_conv_minmax_fp32_scalar_fmagic_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
}
}
}
-TEST(QU8_GEMM_MINMAX_FP32_3X4__SCALAR_MAGIC, n_gt_4_strided_a) {
+TEST(QU8_GEMM_MINMAX_FP32_3X4__SCALAR_FMAGIC, n_gt_4_strided_a) {
for (uint32_t n = 5; n < 8; n++) {
for (size_t k = 1; k <= 5; k += 2) {
GemmMicrokernelTester()
@@ -53285,12 +53285,12 @@
.n(n)
.k(k)
.a_stride(7)
- .Test(xnn_qu8_gemm_minmax_fp32_ukernel_3x4__scalar_magic, xnn_init_qu8_conv_minmax_fp32_scalar_magic_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
+ .Test(xnn_qu8_gemm_minmax_fp32_ukernel_3x4__scalar_fmagic, xnn_init_qu8_conv_minmax_fp32_scalar_fmagic_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
}
}
}
-TEST(QU8_GEMM_MINMAX_FP32_3X4__SCALAR_MAGIC, n_gt_4_subtile) {
+TEST(QU8_GEMM_MINMAX_FP32_3X4__SCALAR_FMAGIC, n_gt_4_subtile) {
for (uint32_t n = 5; n < 8; n++) {
for (size_t k = 1; k <= 5; k += 2) {
for (uint32_t m = 1; m <= 3; m++) {
@@ -53303,13 +53303,13 @@
.n(n)
.k(k)
.iterations(1)
- .Test(xnn_qu8_gemm_minmax_fp32_ukernel_3x4__scalar_magic, xnn_init_qu8_conv_minmax_fp32_scalar_magic_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
+ .Test(xnn_qu8_gemm_minmax_fp32_ukernel_3x4__scalar_fmagic, xnn_init_qu8_conv_minmax_fp32_scalar_fmagic_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
}
}
}
}
-TEST(QU8_GEMM_MINMAX_FP32_3X4__SCALAR_MAGIC, n_div_4) {
+TEST(QU8_GEMM_MINMAX_FP32_3X4__SCALAR_FMAGIC, n_div_4) {
for (uint32_t n = 8; n <= 12; n += 4) {
for (size_t k = 1; k <= 5; k += 2) {
GemmMicrokernelTester()
@@ -53320,12 +53320,12 @@
.m(3)
.n(4)
.k(k)
- .Test(xnn_qu8_gemm_minmax_fp32_ukernel_3x4__scalar_magic, xnn_init_qu8_conv_minmax_fp32_scalar_magic_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
+ .Test(xnn_qu8_gemm_minmax_fp32_ukernel_3x4__scalar_fmagic, xnn_init_qu8_conv_minmax_fp32_scalar_fmagic_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
}
}
}
-TEST(QU8_GEMM_MINMAX_FP32_3X4__SCALAR_MAGIC, n_div_4_strided_cn) {
+TEST(QU8_GEMM_MINMAX_FP32_3X4__SCALAR_FMAGIC, n_div_4_strided_cn) {
for (uint32_t n = 8; n <= 12; n += 4) {
for (size_t k = 1; k <= 5; k += 2) {
GemmMicrokernelTester()
@@ -53337,12 +53337,12 @@
.n(n)
.k(k)
.cn_stride(7)
- .Test(xnn_qu8_gemm_minmax_fp32_ukernel_3x4__scalar_magic, xnn_init_qu8_conv_minmax_fp32_scalar_magic_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
+ .Test(xnn_qu8_gemm_minmax_fp32_ukernel_3x4__scalar_fmagic, xnn_init_qu8_conv_minmax_fp32_scalar_fmagic_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
}
}
}
-TEST(QU8_GEMM_MINMAX_FP32_3X4__SCALAR_MAGIC, n_div_4_strided_a) {
+TEST(QU8_GEMM_MINMAX_FP32_3X4__SCALAR_FMAGIC, n_div_4_strided_a) {
for (uint32_t n = 8; n <= 12; n += 4) {
for (size_t k = 1; k <= 5; k += 2) {
GemmMicrokernelTester()
@@ -53354,12 +53354,12 @@
.n(n)
.k(k)
.a_stride(7)
- .Test(xnn_qu8_gemm_minmax_fp32_ukernel_3x4__scalar_magic, xnn_init_qu8_conv_minmax_fp32_scalar_magic_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
+ .Test(xnn_qu8_gemm_minmax_fp32_ukernel_3x4__scalar_fmagic, xnn_init_qu8_conv_minmax_fp32_scalar_fmagic_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
}
}
}
-TEST(QU8_GEMM_MINMAX_FP32_3X4__SCALAR_MAGIC, n_div_4_subtile) {
+TEST(QU8_GEMM_MINMAX_FP32_3X4__SCALAR_FMAGIC, n_div_4_subtile) {
for (uint32_t n = 8; n <= 12; n += 4) {
for (size_t k = 1; k <= 5; k += 2) {
for (uint32_t m = 1; m <= 3; m++) {
@@ -53372,13 +53372,13 @@
.n(n)
.k(k)
.iterations(1)
- .Test(xnn_qu8_gemm_minmax_fp32_ukernel_3x4__scalar_magic, xnn_init_qu8_conv_minmax_fp32_scalar_magic_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
+ .Test(xnn_qu8_gemm_minmax_fp32_ukernel_3x4__scalar_fmagic, xnn_init_qu8_conv_minmax_fp32_scalar_fmagic_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
}
}
}
}
-TEST(QU8_GEMM_MINMAX_FP32_3X4__SCALAR_MAGIC, strided_cm_subtile) {
+TEST(QU8_GEMM_MINMAX_FP32_3X4__SCALAR_FMAGIC, strided_cm_subtile) {
for (size_t k = 1; k <= 5; k += 2) {
for (uint32_t m = 1; m <= 3; m++) {
for (uint32_t n = 1; n <= 4; n++) {
@@ -53392,13 +53392,13 @@
.k(k)
.cm_stride(7)
.iterations(1)
- .Test(xnn_qu8_gemm_minmax_fp32_ukernel_3x4__scalar_magic, xnn_init_qu8_conv_minmax_fp32_scalar_magic_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
+ .Test(xnn_qu8_gemm_minmax_fp32_ukernel_3x4__scalar_fmagic, xnn_init_qu8_conv_minmax_fp32_scalar_fmagic_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
}
}
}
}
-TEST(QU8_GEMM_MINMAX_FP32_3X4__SCALAR_MAGIC, qmin) {
+TEST(QU8_GEMM_MINMAX_FP32_3X4__SCALAR_FMAGIC, qmin) {
GemmMicrokernelTester()
.mr(3)
.nr(4)
@@ -53408,10 +53408,10 @@
.n(4)
.k(1)
.qmin(128)
- .Test(xnn_qu8_gemm_minmax_fp32_ukernel_3x4__scalar_magic, xnn_init_qu8_conv_minmax_fp32_scalar_magic_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
+ .Test(xnn_qu8_gemm_minmax_fp32_ukernel_3x4__scalar_fmagic, xnn_init_qu8_conv_minmax_fp32_scalar_fmagic_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
}
-TEST(QU8_GEMM_MINMAX_FP32_3X4__SCALAR_MAGIC, qmax) {
+TEST(QU8_GEMM_MINMAX_FP32_3X4__SCALAR_FMAGIC, qmax) {
GemmMicrokernelTester()
.mr(3)
.nr(4)
@@ -53421,10 +53421,10 @@
.n(4)
.k(1)
.qmax(128)
- .Test(xnn_qu8_gemm_minmax_fp32_ukernel_3x4__scalar_magic, xnn_init_qu8_conv_minmax_fp32_scalar_magic_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
+ .Test(xnn_qu8_gemm_minmax_fp32_ukernel_3x4__scalar_fmagic, xnn_init_qu8_conv_minmax_fp32_scalar_fmagic_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
}
-TEST(QU8_GEMM_MINMAX_FP32_3X4__SCALAR_MAGIC, strided_cm) {
+TEST(QU8_GEMM_MINMAX_FP32_3X4__SCALAR_FMAGIC, strided_cm) {
GemmMicrokernelTester()
.mr(3)
.nr(4)
@@ -53434,10 +53434,10 @@
.n(4)
.k(1)
.cm_stride(7)
- .Test(xnn_qu8_gemm_minmax_fp32_ukernel_3x4__scalar_magic, xnn_init_qu8_conv_minmax_fp32_scalar_magic_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
+ .Test(xnn_qu8_gemm_minmax_fp32_ukernel_3x4__scalar_fmagic, xnn_init_qu8_conv_minmax_fp32_scalar_fmagic_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
}
-TEST(QU8_GEMM_MINMAX_FP32_3X4__SCALAR_MAGIC, no_a_zero_point) {
+TEST(QU8_GEMM_MINMAX_FP32_3X4__SCALAR_FMAGIC, no_a_zero_point) {
for (size_t k = 1; k <= 5; k += 2) {
GemmMicrokernelTester()
.mr(3)
@@ -53448,11 +53448,11 @@
.n(4)
.k(k)
.a_zero_point(0)
- .Test(xnn_qu8_gemm_minmax_fp32_ukernel_3x4__scalar_magic, xnn_init_qu8_conv_minmax_fp32_scalar_magic_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
+ .Test(xnn_qu8_gemm_minmax_fp32_ukernel_3x4__scalar_fmagic, xnn_init_qu8_conv_minmax_fp32_scalar_fmagic_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
}
}
-TEST(QU8_GEMM_MINMAX_FP32_3X4__SCALAR_MAGIC, no_b_zero_point) {
+TEST(QU8_GEMM_MINMAX_FP32_3X4__SCALAR_FMAGIC, no_b_zero_point) {
for (size_t k = 1; k <= 5; k += 2) {
GemmMicrokernelTester()
.mr(3)
@@ -53463,11 +53463,11 @@
.n(4)
.k(k)
.b_zero_point(0)
- .Test(xnn_qu8_gemm_minmax_fp32_ukernel_3x4__scalar_magic, xnn_init_qu8_conv_minmax_fp32_scalar_magic_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
+ .Test(xnn_qu8_gemm_minmax_fp32_ukernel_3x4__scalar_fmagic, xnn_init_qu8_conv_minmax_fp32_scalar_fmagic_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
}
}
-TEST(QU8_GEMM_MINMAX_FP32_3X4__SCALAR_MAGIC, no_zero_point) {
+TEST(QU8_GEMM_MINMAX_FP32_3X4__SCALAR_FMAGIC, no_zero_point) {
for (size_t k = 1; k <= 5; k += 2) {
GemmMicrokernelTester()
.mr(3)
@@ -53479,11 +53479,11 @@
.k(k)
.a_zero_point(0)
.b_zero_point(0)
- .Test(xnn_qu8_gemm_minmax_fp32_ukernel_3x4__scalar_magic, xnn_init_qu8_conv_minmax_fp32_scalar_magic_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
+ .Test(xnn_qu8_gemm_minmax_fp32_ukernel_3x4__scalar_fmagic, xnn_init_qu8_conv_minmax_fp32_scalar_fmagic_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
}
}
-TEST(QU8_GEMM_MINMAX_FP32_4X4__SCALAR_MAGIC, k_eq_1) {
+TEST(QU8_GEMM_MINMAX_FP32_4X4__SCALAR_FMAGIC, k_eq_1) {
GemmMicrokernelTester()
.mr(4)
.nr(4)
@@ -53492,10 +53492,10 @@
.m(4)
.n(4)
.k(1)
- .Test(xnn_qu8_gemm_minmax_fp32_ukernel_4x4__scalar_magic, xnn_init_qu8_conv_minmax_fp32_scalar_magic_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
+ .Test(xnn_qu8_gemm_minmax_fp32_ukernel_4x4__scalar_fmagic, xnn_init_qu8_conv_minmax_fp32_scalar_fmagic_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
}
-TEST(QU8_GEMM_MINMAX_FP32_4X4__SCALAR_MAGIC, strided_cn) {
+TEST(QU8_GEMM_MINMAX_FP32_4X4__SCALAR_FMAGIC, strided_cn) {
GemmMicrokernelTester()
.mr(4)
.nr(4)
@@ -53505,10 +53505,10 @@
.n(4)
.k(1)
.cn_stride(7)
- .Test(xnn_qu8_gemm_minmax_fp32_ukernel_4x4__scalar_magic, xnn_init_qu8_conv_minmax_fp32_scalar_magic_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
+ .Test(xnn_qu8_gemm_minmax_fp32_ukernel_4x4__scalar_fmagic, xnn_init_qu8_conv_minmax_fp32_scalar_fmagic_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
}
-TEST(QU8_GEMM_MINMAX_FP32_4X4__SCALAR_MAGIC, k_eq_1_strided_a) {
+TEST(QU8_GEMM_MINMAX_FP32_4X4__SCALAR_FMAGIC, k_eq_1_strided_a) {
GemmMicrokernelTester()
.mr(4)
.nr(4)
@@ -53518,10 +53518,10 @@
.n(4)
.k(1)
.a_stride(3)
- .Test(xnn_qu8_gemm_minmax_fp32_ukernel_4x4__scalar_magic, xnn_init_qu8_conv_minmax_fp32_scalar_magic_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
+ .Test(xnn_qu8_gemm_minmax_fp32_ukernel_4x4__scalar_fmagic, xnn_init_qu8_conv_minmax_fp32_scalar_fmagic_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
}
-TEST(QU8_GEMM_MINMAX_FP32_4X4__SCALAR_MAGIC, k_eq_1_subtile) {
+TEST(QU8_GEMM_MINMAX_FP32_4X4__SCALAR_FMAGIC, k_eq_1_subtile) {
for (uint32_t m = 1; m <= 4; m++) {
for (uint32_t n = 1; n <= 4; n++) {
GemmMicrokernelTester()
@@ -53533,12 +53533,12 @@
.n(n)
.k(1)
.iterations(1)
- .Test(xnn_qu8_gemm_minmax_fp32_ukernel_4x4__scalar_magic, xnn_init_qu8_conv_minmax_fp32_scalar_magic_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
+ .Test(xnn_qu8_gemm_minmax_fp32_ukernel_4x4__scalar_fmagic, xnn_init_qu8_conv_minmax_fp32_scalar_fmagic_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
}
}
}
-TEST(QU8_GEMM_MINMAX_FP32_4X4__SCALAR_MAGIC, k_eq_1_subtile_m) {
+TEST(QU8_GEMM_MINMAX_FP32_4X4__SCALAR_FMAGIC, k_eq_1_subtile_m) {
for (uint32_t m = 1; m <= 4; m++) {
GemmMicrokernelTester()
.mr(4)
@@ -53549,11 +53549,11 @@
.n(4)
.k(1)
.iterations(1)
- .Test(xnn_qu8_gemm_minmax_fp32_ukernel_4x4__scalar_magic, xnn_init_qu8_conv_minmax_fp32_scalar_magic_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
+ .Test(xnn_qu8_gemm_minmax_fp32_ukernel_4x4__scalar_fmagic, xnn_init_qu8_conv_minmax_fp32_scalar_fmagic_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
}
}
-TEST(QU8_GEMM_MINMAX_FP32_4X4__SCALAR_MAGIC, k_eq_1_subtile_n) {
+TEST(QU8_GEMM_MINMAX_FP32_4X4__SCALAR_FMAGIC, k_eq_1_subtile_n) {
for (uint32_t n = 1; n <= 4; n++) {
GemmMicrokernelTester()
.mr(4)
@@ -53564,11 +53564,11 @@
.n(n)
.k(1)
.iterations(1)
- .Test(xnn_qu8_gemm_minmax_fp32_ukernel_4x4__scalar_magic, xnn_init_qu8_conv_minmax_fp32_scalar_magic_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
+ .Test(xnn_qu8_gemm_minmax_fp32_ukernel_4x4__scalar_fmagic, xnn_init_qu8_conv_minmax_fp32_scalar_fmagic_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
}
}
-TEST(QU8_GEMM_MINMAX_FP32_4X4__SCALAR_MAGIC, k_gt_1) {
+TEST(QU8_GEMM_MINMAX_FP32_4X4__SCALAR_FMAGIC, k_gt_1) {
for (size_t k = 2; k < 10; k++) {
GemmMicrokernelTester()
.mr(4)
@@ -53578,11 +53578,11 @@
.m(4)
.n(4)
.k(k)
- .Test(xnn_qu8_gemm_minmax_fp32_ukernel_4x4__scalar_magic, xnn_init_qu8_conv_minmax_fp32_scalar_magic_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
+ .Test(xnn_qu8_gemm_minmax_fp32_ukernel_4x4__scalar_fmagic, xnn_init_qu8_conv_minmax_fp32_scalar_fmagic_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
}
}
-TEST(QU8_GEMM_MINMAX_FP32_4X4__SCALAR_MAGIC, k_gt_1_strided_a) {
+TEST(QU8_GEMM_MINMAX_FP32_4X4__SCALAR_FMAGIC, k_gt_1_strided_a) {
for (size_t k = 2; k < 10; k++) {
GemmMicrokernelTester()
.mr(4)
@@ -53593,11 +53593,11 @@
.n(4)
.k(k)
.a_stride(11)
- .Test(xnn_qu8_gemm_minmax_fp32_ukernel_4x4__scalar_magic, xnn_init_qu8_conv_minmax_fp32_scalar_magic_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
+ .Test(xnn_qu8_gemm_minmax_fp32_ukernel_4x4__scalar_fmagic, xnn_init_qu8_conv_minmax_fp32_scalar_fmagic_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
}
}
-TEST(QU8_GEMM_MINMAX_FP32_4X4__SCALAR_MAGIC, k_gt_1_subtile) {
+TEST(QU8_GEMM_MINMAX_FP32_4X4__SCALAR_FMAGIC, k_gt_1_subtile) {
for (size_t k = 2; k < 10; k++) {
for (uint32_t m = 1; m <= 4; m++) {
for (uint32_t n = 1; n <= 4; n++) {
@@ -53610,13 +53610,13 @@
.n(n)
.k(k)
.iterations(1)
- .Test(xnn_qu8_gemm_minmax_fp32_ukernel_4x4__scalar_magic, xnn_init_qu8_conv_minmax_fp32_scalar_magic_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
+ .Test(xnn_qu8_gemm_minmax_fp32_ukernel_4x4__scalar_fmagic, xnn_init_qu8_conv_minmax_fp32_scalar_fmagic_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
}
}
}
}
-TEST(QU8_GEMM_MINMAX_FP32_4X4__SCALAR_MAGIC, n_gt_4) {
+TEST(QU8_GEMM_MINMAX_FP32_4X4__SCALAR_FMAGIC, n_gt_4) {
for (uint32_t n = 5; n < 8; n++) {
for (size_t k = 1; k <= 5; k += 2) {
GemmMicrokernelTester()
@@ -53627,12 +53627,12 @@
.m(4)
.n(4)
.k(k)
- .Test(xnn_qu8_gemm_minmax_fp32_ukernel_4x4__scalar_magic, xnn_init_qu8_conv_minmax_fp32_scalar_magic_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
+ .Test(xnn_qu8_gemm_minmax_fp32_ukernel_4x4__scalar_fmagic, xnn_init_qu8_conv_minmax_fp32_scalar_fmagic_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
}
}
}
-TEST(QU8_GEMM_MINMAX_FP32_4X4__SCALAR_MAGIC, n_gt_4_strided_cn) {
+TEST(QU8_GEMM_MINMAX_FP32_4X4__SCALAR_FMAGIC, n_gt_4_strided_cn) {
for (uint32_t n = 5; n < 8; n++) {
for (size_t k = 1; k <= 5; k += 2) {
GemmMicrokernelTester()
@@ -53644,12 +53644,12 @@
.n(4)
.k(k)
.cn_stride(7)
- .Test(xnn_qu8_gemm_minmax_fp32_ukernel_4x4__scalar_magic, xnn_init_qu8_conv_minmax_fp32_scalar_magic_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
+ .Test(xnn_qu8_gemm_minmax_fp32_ukernel_4x4__scalar_fmagic, xnn_init_qu8_conv_minmax_fp32_scalar_fmagic_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
}
}
}
-TEST(QU8_GEMM_MINMAX_FP32_4X4__SCALAR_MAGIC, n_gt_4_strided_a) {
+TEST(QU8_GEMM_MINMAX_FP32_4X4__SCALAR_FMAGIC, n_gt_4_strided_a) {
for (uint32_t n = 5; n < 8; n++) {
for (size_t k = 1; k <= 5; k += 2) {
GemmMicrokernelTester()
@@ -53661,12 +53661,12 @@
.n(n)
.k(k)
.a_stride(7)
- .Test(xnn_qu8_gemm_minmax_fp32_ukernel_4x4__scalar_magic, xnn_init_qu8_conv_minmax_fp32_scalar_magic_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
+ .Test(xnn_qu8_gemm_minmax_fp32_ukernel_4x4__scalar_fmagic, xnn_init_qu8_conv_minmax_fp32_scalar_fmagic_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
}
}
}
-TEST(QU8_GEMM_MINMAX_FP32_4X4__SCALAR_MAGIC, n_gt_4_subtile) {
+TEST(QU8_GEMM_MINMAX_FP32_4X4__SCALAR_FMAGIC, n_gt_4_subtile) {
for (uint32_t n = 5; n < 8; n++) {
for (size_t k = 1; k <= 5; k += 2) {
for (uint32_t m = 1; m <= 4; m++) {
@@ -53679,13 +53679,13 @@
.n(n)
.k(k)
.iterations(1)
- .Test(xnn_qu8_gemm_minmax_fp32_ukernel_4x4__scalar_magic, xnn_init_qu8_conv_minmax_fp32_scalar_magic_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
+ .Test(xnn_qu8_gemm_minmax_fp32_ukernel_4x4__scalar_fmagic, xnn_init_qu8_conv_minmax_fp32_scalar_fmagic_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
}
}
}
}
-TEST(QU8_GEMM_MINMAX_FP32_4X4__SCALAR_MAGIC, n_div_4) {
+TEST(QU8_GEMM_MINMAX_FP32_4X4__SCALAR_FMAGIC, n_div_4) {
for (uint32_t n = 8; n <= 12; n += 4) {
for (size_t k = 1; k <= 5; k += 2) {
GemmMicrokernelTester()
@@ -53696,12 +53696,12 @@
.m(4)
.n(4)
.k(k)
- .Test(xnn_qu8_gemm_minmax_fp32_ukernel_4x4__scalar_magic, xnn_init_qu8_conv_minmax_fp32_scalar_magic_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
+ .Test(xnn_qu8_gemm_minmax_fp32_ukernel_4x4__scalar_fmagic, xnn_init_qu8_conv_minmax_fp32_scalar_fmagic_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
}
}
}
-TEST(QU8_GEMM_MINMAX_FP32_4X4__SCALAR_MAGIC, n_div_4_strided_cn) {
+TEST(QU8_GEMM_MINMAX_FP32_4X4__SCALAR_FMAGIC, n_div_4_strided_cn) {
for (uint32_t n = 8; n <= 12; n += 4) {
for (size_t k = 1; k <= 5; k += 2) {
GemmMicrokernelTester()
@@ -53713,12 +53713,12 @@
.n(n)
.k(k)
.cn_stride(7)
- .Test(xnn_qu8_gemm_minmax_fp32_ukernel_4x4__scalar_magic, xnn_init_qu8_conv_minmax_fp32_scalar_magic_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
+ .Test(xnn_qu8_gemm_minmax_fp32_ukernel_4x4__scalar_fmagic, xnn_init_qu8_conv_minmax_fp32_scalar_fmagic_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
}
}
}
-TEST(QU8_GEMM_MINMAX_FP32_4X4__SCALAR_MAGIC, n_div_4_strided_a) {
+TEST(QU8_GEMM_MINMAX_FP32_4X4__SCALAR_FMAGIC, n_div_4_strided_a) {
for (uint32_t n = 8; n <= 12; n += 4) {
for (size_t k = 1; k <= 5; k += 2) {
GemmMicrokernelTester()
@@ -53730,12 +53730,12 @@
.n(n)
.k(k)
.a_stride(7)
- .Test(xnn_qu8_gemm_minmax_fp32_ukernel_4x4__scalar_magic, xnn_init_qu8_conv_minmax_fp32_scalar_magic_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
+ .Test(xnn_qu8_gemm_minmax_fp32_ukernel_4x4__scalar_fmagic, xnn_init_qu8_conv_minmax_fp32_scalar_fmagic_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
}
}
}
-TEST(QU8_GEMM_MINMAX_FP32_4X4__SCALAR_MAGIC, n_div_4_subtile) {
+TEST(QU8_GEMM_MINMAX_FP32_4X4__SCALAR_FMAGIC, n_div_4_subtile) {
for (uint32_t n = 8; n <= 12; n += 4) {
for (size_t k = 1; k <= 5; k += 2) {
for (uint32_t m = 1; m <= 4; m++) {
@@ -53748,13 +53748,13 @@
.n(n)
.k(k)
.iterations(1)
- .Test(xnn_qu8_gemm_minmax_fp32_ukernel_4x4__scalar_magic, xnn_init_qu8_conv_minmax_fp32_scalar_magic_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
+ .Test(xnn_qu8_gemm_minmax_fp32_ukernel_4x4__scalar_fmagic, xnn_init_qu8_conv_minmax_fp32_scalar_fmagic_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
}
}
}
}
-TEST(QU8_GEMM_MINMAX_FP32_4X4__SCALAR_MAGIC, strided_cm_subtile) {
+TEST(QU8_GEMM_MINMAX_FP32_4X4__SCALAR_FMAGIC, strided_cm_subtile) {
for (size_t k = 1; k <= 5; k += 2) {
for (uint32_t m = 1; m <= 4; m++) {
for (uint32_t n = 1; n <= 4; n++) {
@@ -53768,13 +53768,13 @@
.k(k)
.cm_stride(7)
.iterations(1)
- .Test(xnn_qu8_gemm_minmax_fp32_ukernel_4x4__scalar_magic, xnn_init_qu8_conv_minmax_fp32_scalar_magic_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
+ .Test(xnn_qu8_gemm_minmax_fp32_ukernel_4x4__scalar_fmagic, xnn_init_qu8_conv_minmax_fp32_scalar_fmagic_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
}
}
}
}
-TEST(QU8_GEMM_MINMAX_FP32_4X4__SCALAR_MAGIC, qmin) {
+TEST(QU8_GEMM_MINMAX_FP32_4X4__SCALAR_FMAGIC, qmin) {
GemmMicrokernelTester()
.mr(4)
.nr(4)
@@ -53784,10 +53784,10 @@
.n(4)
.k(1)
.qmin(128)
- .Test(xnn_qu8_gemm_minmax_fp32_ukernel_4x4__scalar_magic, xnn_init_qu8_conv_minmax_fp32_scalar_magic_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
+ .Test(xnn_qu8_gemm_minmax_fp32_ukernel_4x4__scalar_fmagic, xnn_init_qu8_conv_minmax_fp32_scalar_fmagic_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
}
-TEST(QU8_GEMM_MINMAX_FP32_4X4__SCALAR_MAGIC, qmax) {
+TEST(QU8_GEMM_MINMAX_FP32_4X4__SCALAR_FMAGIC, qmax) {
GemmMicrokernelTester()
.mr(4)
.nr(4)
@@ -53797,10 +53797,10 @@
.n(4)
.k(1)
.qmax(128)
- .Test(xnn_qu8_gemm_minmax_fp32_ukernel_4x4__scalar_magic, xnn_init_qu8_conv_minmax_fp32_scalar_magic_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
+ .Test(xnn_qu8_gemm_minmax_fp32_ukernel_4x4__scalar_fmagic, xnn_init_qu8_conv_minmax_fp32_scalar_fmagic_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
}
-TEST(QU8_GEMM_MINMAX_FP32_4X4__SCALAR_MAGIC, strided_cm) {
+TEST(QU8_GEMM_MINMAX_FP32_4X4__SCALAR_FMAGIC, strided_cm) {
GemmMicrokernelTester()
.mr(4)
.nr(4)
@@ -53810,10 +53810,10 @@
.n(4)
.k(1)
.cm_stride(7)
- .Test(xnn_qu8_gemm_minmax_fp32_ukernel_4x4__scalar_magic, xnn_init_qu8_conv_minmax_fp32_scalar_magic_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
+ .Test(xnn_qu8_gemm_minmax_fp32_ukernel_4x4__scalar_fmagic, xnn_init_qu8_conv_minmax_fp32_scalar_fmagic_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
}
-TEST(QU8_GEMM_MINMAX_FP32_4X4__SCALAR_MAGIC, no_a_zero_point) {
+TEST(QU8_GEMM_MINMAX_FP32_4X4__SCALAR_FMAGIC, no_a_zero_point) {
for (size_t k = 1; k <= 5; k += 2) {
GemmMicrokernelTester()
.mr(4)
@@ -53824,11 +53824,11 @@
.n(4)
.k(k)
.a_zero_point(0)
- .Test(xnn_qu8_gemm_minmax_fp32_ukernel_4x4__scalar_magic, xnn_init_qu8_conv_minmax_fp32_scalar_magic_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
+ .Test(xnn_qu8_gemm_minmax_fp32_ukernel_4x4__scalar_fmagic, xnn_init_qu8_conv_minmax_fp32_scalar_fmagic_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
}
}
-TEST(QU8_GEMM_MINMAX_FP32_4X4__SCALAR_MAGIC, no_b_zero_point) {
+TEST(QU8_GEMM_MINMAX_FP32_4X4__SCALAR_FMAGIC, no_b_zero_point) {
for (size_t k = 1; k <= 5; k += 2) {
GemmMicrokernelTester()
.mr(4)
@@ -53839,11 +53839,11 @@
.n(4)
.k(k)
.b_zero_point(0)
- .Test(xnn_qu8_gemm_minmax_fp32_ukernel_4x4__scalar_magic, xnn_init_qu8_conv_minmax_fp32_scalar_magic_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
+ .Test(xnn_qu8_gemm_minmax_fp32_ukernel_4x4__scalar_fmagic, xnn_init_qu8_conv_minmax_fp32_scalar_fmagic_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
}
}
-TEST(QU8_GEMM_MINMAX_FP32_4X4__SCALAR_MAGIC, no_zero_point) {
+TEST(QU8_GEMM_MINMAX_FP32_4X4__SCALAR_FMAGIC, no_zero_point) {
for (size_t k = 1; k <= 5; k += 2) {
GemmMicrokernelTester()
.mr(4)
@@ -53855,6 +53855,6 @@
.k(k)
.a_zero_point(0)
.b_zero_point(0)
- .Test(xnn_qu8_gemm_minmax_fp32_ukernel_4x4__scalar_magic, xnn_init_qu8_conv_minmax_fp32_scalar_magic_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
+ .Test(xnn_qu8_gemm_minmax_fp32_ukernel_4x4__scalar_fmagic, xnn_init_qu8_conv_minmax_fp32_scalar_fmagic_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
}
}
\ No newline at end of file
diff --git a/test/qu8-gemm-minmax-fp32.yaml b/test/qu8-gemm-minmax-fp32.yaml
index fb12b98..7e52b9a 100644
--- a/test/qu8-gemm-minmax-fp32.yaml
+++ b/test/qu8-gemm-minmax-fp32.yaml
@@ -292,51 +292,51 @@
- name: xnn_qu8_gemm_minmax_fp32_ukernel_3x4c8__wasmsimd_mul32_ld128
init: xnn_init_qu8_conv_minmax_fp32_wasmsimd_params
k-block: 8
-- name: xnn_qu8_gemm_minmax_fp32_ukernel_1x2__scalar_lrint
- init: xnn_init_qu8_conv_minmax_fp32_scalar_lrint_params
+- name: xnn_qu8_gemm_minmax_fp32_ukernel_1x2__scalar_lrintf
+ init: xnn_init_qu8_conv_minmax_fp32_scalar_lrintf_params
k-block: 1
-- name: xnn_qu8_gemm_minmax_fp32_ukernel_2x2__scalar_lrint
- init: xnn_init_qu8_conv_minmax_fp32_scalar_lrint_params
+- name: xnn_qu8_gemm_minmax_fp32_ukernel_2x2__scalar_lrintf
+ init: xnn_init_qu8_conv_minmax_fp32_scalar_lrintf_params
k-block: 1
-- name: xnn_qu8_gemm_minmax_fp32_ukernel_3x2__scalar_lrint
- init: xnn_init_qu8_conv_minmax_fp32_scalar_lrint_params
+- name: xnn_qu8_gemm_minmax_fp32_ukernel_3x2__scalar_lrintf
+ init: xnn_init_qu8_conv_minmax_fp32_scalar_lrintf_params
k-block: 1
-- name: xnn_qu8_gemm_minmax_fp32_ukernel_4x2__scalar_lrint
- init: xnn_init_qu8_conv_minmax_fp32_scalar_lrint_params
+- name: xnn_qu8_gemm_minmax_fp32_ukernel_4x2__scalar_lrintf
+ init: xnn_init_qu8_conv_minmax_fp32_scalar_lrintf_params
k-block: 1
-- name: xnn_qu8_gemm_minmax_fp32_ukernel_1x4__scalar_lrint
- init: xnn_init_qu8_conv_minmax_fp32_scalar_lrint_params
+- name: xnn_qu8_gemm_minmax_fp32_ukernel_1x4__scalar_lrintf
+ init: xnn_init_qu8_conv_minmax_fp32_scalar_lrintf_params
k-block: 1
-- name: xnn_qu8_gemm_minmax_fp32_ukernel_2x4__scalar_lrint
- init: xnn_init_qu8_conv_minmax_fp32_scalar_lrint_params
+- name: xnn_qu8_gemm_minmax_fp32_ukernel_2x4__scalar_lrintf
+ init: xnn_init_qu8_conv_minmax_fp32_scalar_lrintf_params
k-block: 1
-- name: xnn_qu8_gemm_minmax_fp32_ukernel_3x4__scalar_lrint
- init: xnn_init_qu8_conv_minmax_fp32_scalar_lrint_params
+- name: xnn_qu8_gemm_minmax_fp32_ukernel_3x4__scalar_lrintf
+ init: xnn_init_qu8_conv_minmax_fp32_scalar_lrintf_params
k-block: 1
-- name: xnn_qu8_gemm_minmax_fp32_ukernel_4x4__scalar_lrint
- init: xnn_init_qu8_conv_minmax_fp32_scalar_lrint_params
+- name: xnn_qu8_gemm_minmax_fp32_ukernel_4x4__scalar_lrintf
+ init: xnn_init_qu8_conv_minmax_fp32_scalar_lrintf_params
k-block: 1
-- name: xnn_qu8_gemm_minmax_fp32_ukernel_1x2__scalar_magic
- init: xnn_init_qu8_conv_minmax_fp32_scalar_magic_params
+- name: xnn_qu8_gemm_minmax_fp32_ukernel_1x2__scalar_fmagic
+ init: xnn_init_qu8_conv_minmax_fp32_scalar_fmagic_params
k-block: 1
-- name: xnn_qu8_gemm_minmax_fp32_ukernel_2x2__scalar_magic
- init: xnn_init_qu8_conv_minmax_fp32_scalar_magic_params
+- name: xnn_qu8_gemm_minmax_fp32_ukernel_2x2__scalar_fmagic
+ init: xnn_init_qu8_conv_minmax_fp32_scalar_fmagic_params
k-block: 1
-- name: xnn_qu8_gemm_minmax_fp32_ukernel_3x2__scalar_magic
- init: xnn_init_qu8_conv_minmax_fp32_scalar_magic_params
+- name: xnn_qu8_gemm_minmax_fp32_ukernel_3x2__scalar_fmagic
+ init: xnn_init_qu8_conv_minmax_fp32_scalar_fmagic_params
k-block: 1
-- name: xnn_qu8_gemm_minmax_fp32_ukernel_4x2__scalar_magic
- init: xnn_init_qu8_conv_minmax_fp32_scalar_magic_params
+- name: xnn_qu8_gemm_minmax_fp32_ukernel_4x2__scalar_fmagic
+ init: xnn_init_qu8_conv_minmax_fp32_scalar_fmagic_params
k-block: 1
-- name: xnn_qu8_gemm_minmax_fp32_ukernel_1x4__scalar_magic
- init: xnn_init_qu8_conv_minmax_fp32_scalar_magic_params
+- name: xnn_qu8_gemm_minmax_fp32_ukernel_1x4__scalar_fmagic
+ init: xnn_init_qu8_conv_minmax_fp32_scalar_fmagic_params
k-block: 1
-- name: xnn_qu8_gemm_minmax_fp32_ukernel_2x4__scalar_magic
- init: xnn_init_qu8_conv_minmax_fp32_scalar_magic_params
+- name: xnn_qu8_gemm_minmax_fp32_ukernel_2x4__scalar_fmagic
+ init: xnn_init_qu8_conv_minmax_fp32_scalar_fmagic_params
k-block: 1
-- name: xnn_qu8_gemm_minmax_fp32_ukernel_3x4__scalar_magic
- init: xnn_init_qu8_conv_minmax_fp32_scalar_magic_params
+- name: xnn_qu8_gemm_minmax_fp32_ukernel_3x4__scalar_fmagic
+ init: xnn_init_qu8_conv_minmax_fp32_scalar_fmagic_params
k-block: 1
-- name: xnn_qu8_gemm_minmax_fp32_ukernel_4x4__scalar_magic
- init: xnn_init_qu8_conv_minmax_fp32_scalar_magic_params
+- name: xnn_qu8_gemm_minmax_fp32_ukernel_4x4__scalar_fmagic
+ init: xnn_init_qu8_conv_minmax_fp32_scalar_fmagic_params
k-block: 1
diff --git a/test/qu8-igemm-minmax-fp32.cc b/test/qu8-igemm-minmax-fp32.cc
index 82648e5..3b5506c 100644
--- a/test/qu8-igemm-minmax-fp32.cc
+++ b/test/qu8-igemm-minmax-fp32.cc
@@ -48995,7 +48995,7 @@
#endif // XNN_ARCH_WASMSIMD || XNN_ARCH_WASMRELAXEDSIMD
-TEST(QU8_IGEMM_MINMAX_FP32_1X2__SCALAR_LRINT, k_eq_1) {
+TEST(QU8_IGEMM_MINMAX_FP32_1X2__SCALAR_LRINTF, k_eq_1) {
GemmMicrokernelTester()
.mr(1)
.nr(2)
@@ -49004,10 +49004,10 @@
.m(1)
.n(2)
.k(1)
- .Test(xnn_qu8_igemm_minmax_fp32_ukernel_1x2__scalar_lrint, xnn_init_qu8_conv_minmax_fp32_scalar_lrint_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
+ .Test(xnn_qu8_igemm_minmax_fp32_ukernel_1x2__scalar_lrintf, xnn_init_qu8_conv_minmax_fp32_scalar_lrintf_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
}
-TEST(QU8_IGEMM_MINMAX_FP32_1X2__SCALAR_LRINT, strided_cn) {
+TEST(QU8_IGEMM_MINMAX_FP32_1X2__SCALAR_LRINTF, strided_cn) {
GemmMicrokernelTester()
.mr(1)
.nr(2)
@@ -49017,10 +49017,10 @@
.n(2)
.k(1)
.cn_stride(5)
- .Test(xnn_qu8_igemm_minmax_fp32_ukernel_1x2__scalar_lrint, xnn_init_qu8_conv_minmax_fp32_scalar_lrint_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
+ .Test(xnn_qu8_igemm_minmax_fp32_ukernel_1x2__scalar_lrintf, xnn_init_qu8_conv_minmax_fp32_scalar_lrintf_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
}
-TEST(QU8_IGEMM_MINMAX_FP32_1X2__SCALAR_LRINT, k_eq_1_subtile) {
+TEST(QU8_IGEMM_MINMAX_FP32_1X2__SCALAR_LRINTF, k_eq_1_subtile) {
for (uint32_t m = 1; m <= 1; m++) {
for (uint32_t n = 1; n <= 2; n++) {
GemmMicrokernelTester()
@@ -49032,12 +49032,12 @@
.n(n)
.k(1)
.iterations(1)
- .Test(xnn_qu8_igemm_minmax_fp32_ukernel_1x2__scalar_lrint, xnn_init_qu8_conv_minmax_fp32_scalar_lrint_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
+ .Test(xnn_qu8_igemm_minmax_fp32_ukernel_1x2__scalar_lrintf, xnn_init_qu8_conv_minmax_fp32_scalar_lrintf_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
}
}
}
-TEST(QU8_IGEMM_MINMAX_FP32_1X2__SCALAR_LRINT, k_eq_1_subtile_m) {
+TEST(QU8_IGEMM_MINMAX_FP32_1X2__SCALAR_LRINTF, k_eq_1_subtile_m) {
for (uint32_t m = 1; m <= 1; m++) {
GemmMicrokernelTester()
.mr(1)
@@ -49048,11 +49048,11 @@
.n(2)
.k(1)
.iterations(1)
- .Test(xnn_qu8_igemm_minmax_fp32_ukernel_1x2__scalar_lrint, xnn_init_qu8_conv_minmax_fp32_scalar_lrint_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
+ .Test(xnn_qu8_igemm_minmax_fp32_ukernel_1x2__scalar_lrintf, xnn_init_qu8_conv_minmax_fp32_scalar_lrintf_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
}
}
-TEST(QU8_IGEMM_MINMAX_FP32_1X2__SCALAR_LRINT, k_eq_1_subtile_n) {
+TEST(QU8_IGEMM_MINMAX_FP32_1X2__SCALAR_LRINTF, k_eq_1_subtile_n) {
for (uint32_t n = 1; n <= 2; n++) {
GemmMicrokernelTester()
.mr(1)
@@ -49063,11 +49063,11 @@
.n(n)
.k(1)
.iterations(1)
- .Test(xnn_qu8_igemm_minmax_fp32_ukernel_1x2__scalar_lrint, xnn_init_qu8_conv_minmax_fp32_scalar_lrint_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
+ .Test(xnn_qu8_igemm_minmax_fp32_ukernel_1x2__scalar_lrintf, xnn_init_qu8_conv_minmax_fp32_scalar_lrintf_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
}
}
-TEST(QU8_IGEMM_MINMAX_FP32_1X2__SCALAR_LRINT, k_gt_1) {
+TEST(QU8_IGEMM_MINMAX_FP32_1X2__SCALAR_LRINTF, k_gt_1) {
for (size_t k = 2; k < 10; k++) {
GemmMicrokernelTester()
.mr(1)
@@ -49077,11 +49077,11 @@
.m(1)
.n(2)
.k(k)
- .Test(xnn_qu8_igemm_minmax_fp32_ukernel_1x2__scalar_lrint, xnn_init_qu8_conv_minmax_fp32_scalar_lrint_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
+ .Test(xnn_qu8_igemm_minmax_fp32_ukernel_1x2__scalar_lrintf, xnn_init_qu8_conv_minmax_fp32_scalar_lrintf_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
}
}
-TEST(QU8_IGEMM_MINMAX_FP32_1X2__SCALAR_LRINT, k_gt_1_subtile) {
+TEST(QU8_IGEMM_MINMAX_FP32_1X2__SCALAR_LRINTF, k_gt_1_subtile) {
for (size_t k = 2; k < 10; k++) {
for (uint32_t m = 1; m <= 1; m++) {
for (uint32_t n = 1; n <= 2; n++) {
@@ -49094,13 +49094,13 @@
.n(n)
.k(k)
.iterations(1)
- .Test(xnn_qu8_igemm_minmax_fp32_ukernel_1x2__scalar_lrint, xnn_init_qu8_conv_minmax_fp32_scalar_lrint_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
+ .Test(xnn_qu8_igemm_minmax_fp32_ukernel_1x2__scalar_lrintf, xnn_init_qu8_conv_minmax_fp32_scalar_lrintf_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
}
}
}
}
-TEST(QU8_IGEMM_MINMAX_FP32_1X2__SCALAR_LRINT, n_gt_2) {
+TEST(QU8_IGEMM_MINMAX_FP32_1X2__SCALAR_LRINTF, n_gt_2) {
for (uint32_t n = 3; n < 4; n++) {
for (size_t k = 1; k <= 5; k += 2) {
GemmMicrokernelTester()
@@ -49111,12 +49111,12 @@
.m(1)
.n(2)
.k(k)
- .Test(xnn_qu8_igemm_minmax_fp32_ukernel_1x2__scalar_lrint, xnn_init_qu8_conv_minmax_fp32_scalar_lrint_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
+ .Test(xnn_qu8_igemm_minmax_fp32_ukernel_1x2__scalar_lrintf, xnn_init_qu8_conv_minmax_fp32_scalar_lrintf_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
}
}
}
-TEST(QU8_IGEMM_MINMAX_FP32_1X2__SCALAR_LRINT, n_gt_2_strided_cn) {
+TEST(QU8_IGEMM_MINMAX_FP32_1X2__SCALAR_LRINTF, n_gt_2_strided_cn) {
for (uint32_t n = 3; n < 4; n++) {
for (size_t k = 1; k <= 5; k += 2) {
GemmMicrokernelTester()
@@ -49128,12 +49128,12 @@
.n(2)
.k(k)
.cn_stride(5)
- .Test(xnn_qu8_igemm_minmax_fp32_ukernel_1x2__scalar_lrint, xnn_init_qu8_conv_minmax_fp32_scalar_lrint_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
+ .Test(xnn_qu8_igemm_minmax_fp32_ukernel_1x2__scalar_lrintf, xnn_init_qu8_conv_minmax_fp32_scalar_lrintf_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
}
}
}
-TEST(QU8_IGEMM_MINMAX_FP32_1X2__SCALAR_LRINT, n_gt_2_subtile) {
+TEST(QU8_IGEMM_MINMAX_FP32_1X2__SCALAR_LRINTF, n_gt_2_subtile) {
for (uint32_t n = 3; n < 4; n++) {
for (size_t k = 1; k <= 5; k += 2) {
for (uint32_t m = 1; m <= 1; m++) {
@@ -49146,13 +49146,13 @@
.n(n)
.k(k)
.iterations(1)
- .Test(xnn_qu8_igemm_minmax_fp32_ukernel_1x2__scalar_lrint, xnn_init_qu8_conv_minmax_fp32_scalar_lrint_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
+ .Test(xnn_qu8_igemm_minmax_fp32_ukernel_1x2__scalar_lrintf, xnn_init_qu8_conv_minmax_fp32_scalar_lrintf_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
}
}
}
}
-TEST(QU8_IGEMM_MINMAX_FP32_1X2__SCALAR_LRINT, n_div_2) {
+TEST(QU8_IGEMM_MINMAX_FP32_1X2__SCALAR_LRINTF, n_div_2) {
for (uint32_t n = 4; n <= 6; n += 2) {
for (size_t k = 1; k <= 5; k += 2) {
GemmMicrokernelTester()
@@ -49163,12 +49163,12 @@
.m(1)
.n(2)
.k(k)
- .Test(xnn_qu8_igemm_minmax_fp32_ukernel_1x2__scalar_lrint, xnn_init_qu8_conv_minmax_fp32_scalar_lrint_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
+ .Test(xnn_qu8_igemm_minmax_fp32_ukernel_1x2__scalar_lrintf, xnn_init_qu8_conv_minmax_fp32_scalar_lrintf_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
}
}
}
-TEST(QU8_IGEMM_MINMAX_FP32_1X2__SCALAR_LRINT, n_div_2_strided_cn) {
+TEST(QU8_IGEMM_MINMAX_FP32_1X2__SCALAR_LRINTF, n_div_2_strided_cn) {
for (uint32_t n = 4; n <= 6; n += 2) {
for (size_t k = 1; k <= 5; k += 2) {
GemmMicrokernelTester()
@@ -49180,12 +49180,12 @@
.n(n)
.k(k)
.cn_stride(5)
- .Test(xnn_qu8_igemm_minmax_fp32_ukernel_1x2__scalar_lrint, xnn_init_qu8_conv_minmax_fp32_scalar_lrint_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
+ .Test(xnn_qu8_igemm_minmax_fp32_ukernel_1x2__scalar_lrintf, xnn_init_qu8_conv_minmax_fp32_scalar_lrintf_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
}
}
}
-TEST(QU8_IGEMM_MINMAX_FP32_1X2__SCALAR_LRINT, n_div_2_subtile) {
+TEST(QU8_IGEMM_MINMAX_FP32_1X2__SCALAR_LRINTF, n_div_2_subtile) {
for (uint32_t n = 4; n <= 6; n += 2) {
for (size_t k = 1; k <= 5; k += 2) {
for (uint32_t m = 1; m <= 1; m++) {
@@ -49198,13 +49198,13 @@
.n(n)
.k(k)
.iterations(1)
- .Test(xnn_qu8_igemm_minmax_fp32_ukernel_1x2__scalar_lrint, xnn_init_qu8_conv_minmax_fp32_scalar_lrint_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
+ .Test(xnn_qu8_igemm_minmax_fp32_ukernel_1x2__scalar_lrintf, xnn_init_qu8_conv_minmax_fp32_scalar_lrintf_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
}
}
}
}
-TEST(QU8_IGEMM_MINMAX_FP32_1X2__SCALAR_LRINT, small_kernel) {
+TEST(QU8_IGEMM_MINMAX_FP32_1X2__SCALAR_LRINTF, small_kernel) {
for (size_t k = 1; k <= 5; k += 2) {
GemmMicrokernelTester()
.mr(1)
@@ -49215,11 +49215,11 @@
.n(2)
.k(k)
.ks(3)
- .Test(xnn_qu8_igemm_minmax_fp32_ukernel_1x2__scalar_lrint, xnn_init_qu8_conv_minmax_fp32_scalar_lrint_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
+ .Test(xnn_qu8_igemm_minmax_fp32_ukernel_1x2__scalar_lrintf, xnn_init_qu8_conv_minmax_fp32_scalar_lrintf_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
}
}
-TEST(QU8_IGEMM_MINMAX_FP32_1X2__SCALAR_LRINT, small_kernel_subtile) {
+TEST(QU8_IGEMM_MINMAX_FP32_1X2__SCALAR_LRINTF, small_kernel_subtile) {
for (size_t k = 1; k <= 5; k += 2) {
for (uint32_t m = 1; m <= 1; m++) {
for (uint32_t n = 1; n <= 2; n++) {
@@ -49233,13 +49233,13 @@
.k(k)
.ks(3)
.iterations(1)
- .Test(xnn_qu8_igemm_minmax_fp32_ukernel_1x2__scalar_lrint, xnn_init_qu8_conv_minmax_fp32_scalar_lrint_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
+ .Test(xnn_qu8_igemm_minmax_fp32_ukernel_1x2__scalar_lrintf, xnn_init_qu8_conv_minmax_fp32_scalar_lrintf_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
}
}
}
}
-TEST(QU8_IGEMM_MINMAX_FP32_1X2__SCALAR_LRINT, n_gt_2_small_kernel) {
+TEST(QU8_IGEMM_MINMAX_FP32_1X2__SCALAR_LRINTF, n_gt_2_small_kernel) {
for (uint32_t n = 3; n < 4; n++) {
for (size_t k = 1; k <= 5; k += 2) {
GemmMicrokernelTester()
@@ -49251,12 +49251,12 @@
.n(2)
.k(k)
.ks(3)
- .Test(xnn_qu8_igemm_minmax_fp32_ukernel_1x2__scalar_lrint, xnn_init_qu8_conv_minmax_fp32_scalar_lrint_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
+ .Test(xnn_qu8_igemm_minmax_fp32_ukernel_1x2__scalar_lrintf, xnn_init_qu8_conv_minmax_fp32_scalar_lrintf_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
}
}
}
-TEST(QU8_IGEMM_MINMAX_FP32_1X2__SCALAR_LRINT, n_div_2_small_kernel) {
+TEST(QU8_IGEMM_MINMAX_FP32_1X2__SCALAR_LRINTF, n_div_2_small_kernel) {
for (uint32_t n = 4; n <= 6; n += 2) {
for (size_t k = 1; k <= 5; k += 2) {
GemmMicrokernelTester()
@@ -49268,12 +49268,12 @@
.n(2)
.k(k)
.ks(3)
- .Test(xnn_qu8_igemm_minmax_fp32_ukernel_1x2__scalar_lrint, xnn_init_qu8_conv_minmax_fp32_scalar_lrint_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
+ .Test(xnn_qu8_igemm_minmax_fp32_ukernel_1x2__scalar_lrintf, xnn_init_qu8_conv_minmax_fp32_scalar_lrintf_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
}
}
}
-TEST(QU8_IGEMM_MINMAX_FP32_1X2__SCALAR_LRINT, strided_cm_subtile) {
+TEST(QU8_IGEMM_MINMAX_FP32_1X2__SCALAR_LRINTF, strided_cm_subtile) {
for (size_t k = 1; k <= 5; k += 2) {
for (uint32_t m = 1; m <= 1; m++) {
for (uint32_t n = 1; n <= 2; n++) {
@@ -49287,13 +49287,13 @@
.k(k)
.cm_stride(5)
.iterations(1)
- .Test(xnn_qu8_igemm_minmax_fp32_ukernel_1x2__scalar_lrint, xnn_init_qu8_conv_minmax_fp32_scalar_lrint_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
+ .Test(xnn_qu8_igemm_minmax_fp32_ukernel_1x2__scalar_lrintf, xnn_init_qu8_conv_minmax_fp32_scalar_lrintf_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
}
}
}
}
-TEST(QU8_IGEMM_MINMAX_FP32_1X2__SCALAR_LRINT, a_offset) {
+TEST(QU8_IGEMM_MINMAX_FP32_1X2__SCALAR_LRINTF, a_offset) {
for (size_t k = 1; k <= 5; k += 2) {
GemmMicrokernelTester()
.mr(1)
@@ -49305,11 +49305,11 @@
.k(k)
.ks(3)
.a_offset(7)
- .Test(xnn_qu8_igemm_minmax_fp32_ukernel_1x2__scalar_lrint, xnn_init_qu8_conv_minmax_fp32_scalar_lrint_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
+ .Test(xnn_qu8_igemm_minmax_fp32_ukernel_1x2__scalar_lrintf, xnn_init_qu8_conv_minmax_fp32_scalar_lrintf_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
}
}
-TEST(QU8_IGEMM_MINMAX_FP32_1X2__SCALAR_LRINT, zero) {
+TEST(QU8_IGEMM_MINMAX_FP32_1X2__SCALAR_LRINTF, zero) {
for (uint32_t mz = 0; mz < 1; mz++) {
for (size_t k = 1; k <= 5; k += 2) {
GemmMicrokernelTester()
@@ -49323,12 +49323,12 @@
.ks(3)
.a_offset(7)
.zero_index(mz)
- .Test(xnn_qu8_igemm_minmax_fp32_ukernel_1x2__scalar_lrint, xnn_init_qu8_conv_minmax_fp32_scalar_lrint_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
+ .Test(xnn_qu8_igemm_minmax_fp32_ukernel_1x2__scalar_lrintf, xnn_init_qu8_conv_minmax_fp32_scalar_lrintf_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
}
}
}
-TEST(QU8_IGEMM_MINMAX_FP32_1X2__SCALAR_LRINT, qmin) {
+TEST(QU8_IGEMM_MINMAX_FP32_1X2__SCALAR_LRINTF, qmin) {
GemmMicrokernelTester()
.mr(1)
.nr(2)
@@ -49338,10 +49338,10 @@
.n(2)
.k(1)
.qmin(128)
- .Test(xnn_qu8_igemm_minmax_fp32_ukernel_1x2__scalar_lrint, xnn_init_qu8_conv_minmax_fp32_scalar_lrint_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
+ .Test(xnn_qu8_igemm_minmax_fp32_ukernel_1x2__scalar_lrintf, xnn_init_qu8_conv_minmax_fp32_scalar_lrintf_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
}
-TEST(QU8_IGEMM_MINMAX_FP32_1X2__SCALAR_LRINT, qmax) {
+TEST(QU8_IGEMM_MINMAX_FP32_1X2__SCALAR_LRINTF, qmax) {
GemmMicrokernelTester()
.mr(1)
.nr(2)
@@ -49351,10 +49351,10 @@
.n(2)
.k(1)
.qmax(128)
- .Test(xnn_qu8_igemm_minmax_fp32_ukernel_1x2__scalar_lrint, xnn_init_qu8_conv_minmax_fp32_scalar_lrint_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
+ .Test(xnn_qu8_igemm_minmax_fp32_ukernel_1x2__scalar_lrintf, xnn_init_qu8_conv_minmax_fp32_scalar_lrintf_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
}
-TEST(QU8_IGEMM_MINMAX_FP32_1X2__SCALAR_LRINT, strided_cm) {
+TEST(QU8_IGEMM_MINMAX_FP32_1X2__SCALAR_LRINTF, strided_cm) {
GemmMicrokernelTester()
.mr(1)
.nr(2)
@@ -49364,10 +49364,10 @@
.n(2)
.k(1)
.cm_stride(5)
- .Test(xnn_qu8_igemm_minmax_fp32_ukernel_1x2__scalar_lrint, xnn_init_qu8_conv_minmax_fp32_scalar_lrint_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
+ .Test(xnn_qu8_igemm_minmax_fp32_ukernel_1x2__scalar_lrintf, xnn_init_qu8_conv_minmax_fp32_scalar_lrintf_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
}
-TEST(QU8_IGEMM_MINMAX_FP32_1X2__SCALAR_LRINT, no_a_zero_point) {
+TEST(QU8_IGEMM_MINMAX_FP32_1X2__SCALAR_LRINTF, no_a_zero_point) {
for (size_t k = 1; k <= 5; k += 2) {
GemmMicrokernelTester()
.mr(1)
@@ -49378,11 +49378,11 @@
.n(2)
.k(k)
.a_zero_point(0)
- .Test(xnn_qu8_igemm_minmax_fp32_ukernel_1x2__scalar_lrint, xnn_init_qu8_conv_minmax_fp32_scalar_lrint_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
+ .Test(xnn_qu8_igemm_minmax_fp32_ukernel_1x2__scalar_lrintf, xnn_init_qu8_conv_minmax_fp32_scalar_lrintf_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
}
}
-TEST(QU8_IGEMM_MINMAX_FP32_1X2__SCALAR_LRINT, no_b_zero_point) {
+TEST(QU8_IGEMM_MINMAX_FP32_1X2__SCALAR_LRINTF, no_b_zero_point) {
for (size_t k = 1; k <= 5; k += 2) {
GemmMicrokernelTester()
.mr(1)
@@ -49393,11 +49393,11 @@
.n(2)
.k(k)
.b_zero_point(0)
- .Test(xnn_qu8_igemm_minmax_fp32_ukernel_1x2__scalar_lrint, xnn_init_qu8_conv_minmax_fp32_scalar_lrint_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
+ .Test(xnn_qu8_igemm_minmax_fp32_ukernel_1x2__scalar_lrintf, xnn_init_qu8_conv_minmax_fp32_scalar_lrintf_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
}
}
-TEST(QU8_IGEMM_MINMAX_FP32_1X2__SCALAR_LRINT, no_zero_point) {
+TEST(QU8_IGEMM_MINMAX_FP32_1X2__SCALAR_LRINTF, no_zero_point) {
for (size_t k = 1; k <= 5; k += 2) {
GemmMicrokernelTester()
.mr(1)
@@ -49409,11 +49409,11 @@
.k(k)
.a_zero_point(0)
.b_zero_point(0)
- .Test(xnn_qu8_igemm_minmax_fp32_ukernel_1x2__scalar_lrint, xnn_init_qu8_conv_minmax_fp32_scalar_lrint_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
+ .Test(xnn_qu8_igemm_minmax_fp32_ukernel_1x2__scalar_lrintf, xnn_init_qu8_conv_minmax_fp32_scalar_lrintf_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
}
}
-TEST(QU8_IGEMM_MINMAX_FP32_2X2__SCALAR_LRINT, k_eq_1) {
+TEST(QU8_IGEMM_MINMAX_FP32_2X2__SCALAR_LRINTF, k_eq_1) {
GemmMicrokernelTester()
.mr(2)
.nr(2)
@@ -49422,10 +49422,10 @@
.m(2)
.n(2)
.k(1)
- .Test(xnn_qu8_igemm_minmax_fp32_ukernel_2x2__scalar_lrint, xnn_init_qu8_conv_minmax_fp32_scalar_lrint_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
+ .Test(xnn_qu8_igemm_minmax_fp32_ukernel_2x2__scalar_lrintf, xnn_init_qu8_conv_minmax_fp32_scalar_lrintf_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
}
-TEST(QU8_IGEMM_MINMAX_FP32_2X2__SCALAR_LRINT, strided_cn) {
+TEST(QU8_IGEMM_MINMAX_FP32_2X2__SCALAR_LRINTF, strided_cn) {
GemmMicrokernelTester()
.mr(2)
.nr(2)
@@ -49435,10 +49435,10 @@
.n(2)
.k(1)
.cn_stride(5)
- .Test(xnn_qu8_igemm_minmax_fp32_ukernel_2x2__scalar_lrint, xnn_init_qu8_conv_minmax_fp32_scalar_lrint_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
+ .Test(xnn_qu8_igemm_minmax_fp32_ukernel_2x2__scalar_lrintf, xnn_init_qu8_conv_minmax_fp32_scalar_lrintf_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
}
-TEST(QU8_IGEMM_MINMAX_FP32_2X2__SCALAR_LRINT, k_eq_1_subtile) {
+TEST(QU8_IGEMM_MINMAX_FP32_2X2__SCALAR_LRINTF, k_eq_1_subtile) {
for (uint32_t m = 1; m <= 2; m++) {
for (uint32_t n = 1; n <= 2; n++) {
GemmMicrokernelTester()
@@ -49450,12 +49450,12 @@
.n(n)
.k(1)
.iterations(1)
- .Test(xnn_qu8_igemm_minmax_fp32_ukernel_2x2__scalar_lrint, xnn_init_qu8_conv_minmax_fp32_scalar_lrint_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
+ .Test(xnn_qu8_igemm_minmax_fp32_ukernel_2x2__scalar_lrintf, xnn_init_qu8_conv_minmax_fp32_scalar_lrintf_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
}
}
}
-TEST(QU8_IGEMM_MINMAX_FP32_2X2__SCALAR_LRINT, k_eq_1_subtile_m) {
+TEST(QU8_IGEMM_MINMAX_FP32_2X2__SCALAR_LRINTF, k_eq_1_subtile_m) {
for (uint32_t m = 1; m <= 2; m++) {
GemmMicrokernelTester()
.mr(2)
@@ -49466,11 +49466,11 @@
.n(2)
.k(1)
.iterations(1)
- .Test(xnn_qu8_igemm_minmax_fp32_ukernel_2x2__scalar_lrint, xnn_init_qu8_conv_minmax_fp32_scalar_lrint_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
+ .Test(xnn_qu8_igemm_minmax_fp32_ukernel_2x2__scalar_lrintf, xnn_init_qu8_conv_minmax_fp32_scalar_lrintf_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
}
}
-TEST(QU8_IGEMM_MINMAX_FP32_2X2__SCALAR_LRINT, k_eq_1_subtile_n) {
+TEST(QU8_IGEMM_MINMAX_FP32_2X2__SCALAR_LRINTF, k_eq_1_subtile_n) {
for (uint32_t n = 1; n <= 2; n++) {
GemmMicrokernelTester()
.mr(2)
@@ -49481,11 +49481,11 @@
.n(n)
.k(1)
.iterations(1)
- .Test(xnn_qu8_igemm_minmax_fp32_ukernel_2x2__scalar_lrint, xnn_init_qu8_conv_minmax_fp32_scalar_lrint_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
+ .Test(xnn_qu8_igemm_minmax_fp32_ukernel_2x2__scalar_lrintf, xnn_init_qu8_conv_minmax_fp32_scalar_lrintf_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
}
}
-TEST(QU8_IGEMM_MINMAX_FP32_2X2__SCALAR_LRINT, k_gt_1) {
+TEST(QU8_IGEMM_MINMAX_FP32_2X2__SCALAR_LRINTF, k_gt_1) {
for (size_t k = 2; k < 10; k++) {
GemmMicrokernelTester()
.mr(2)
@@ -49495,11 +49495,11 @@
.m(2)
.n(2)
.k(k)
- .Test(xnn_qu8_igemm_minmax_fp32_ukernel_2x2__scalar_lrint, xnn_init_qu8_conv_minmax_fp32_scalar_lrint_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
+ .Test(xnn_qu8_igemm_minmax_fp32_ukernel_2x2__scalar_lrintf, xnn_init_qu8_conv_minmax_fp32_scalar_lrintf_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
}
}
-TEST(QU8_IGEMM_MINMAX_FP32_2X2__SCALAR_LRINT, k_gt_1_subtile) {
+TEST(QU8_IGEMM_MINMAX_FP32_2X2__SCALAR_LRINTF, k_gt_1_subtile) {
for (size_t k = 2; k < 10; k++) {
for (uint32_t m = 1; m <= 2; m++) {
for (uint32_t n = 1; n <= 2; n++) {
@@ -49512,13 +49512,13 @@
.n(n)
.k(k)
.iterations(1)
- .Test(xnn_qu8_igemm_minmax_fp32_ukernel_2x2__scalar_lrint, xnn_init_qu8_conv_minmax_fp32_scalar_lrint_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
+ .Test(xnn_qu8_igemm_minmax_fp32_ukernel_2x2__scalar_lrintf, xnn_init_qu8_conv_minmax_fp32_scalar_lrintf_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
}
}
}
}
-TEST(QU8_IGEMM_MINMAX_FP32_2X2__SCALAR_LRINT, n_gt_2) {
+TEST(QU8_IGEMM_MINMAX_FP32_2X2__SCALAR_LRINTF, n_gt_2) {
for (uint32_t n = 3; n < 4; n++) {
for (size_t k = 1; k <= 5; k += 2) {
GemmMicrokernelTester()
@@ -49529,12 +49529,12 @@
.m(2)
.n(2)
.k(k)
- .Test(xnn_qu8_igemm_minmax_fp32_ukernel_2x2__scalar_lrint, xnn_init_qu8_conv_minmax_fp32_scalar_lrint_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
+ .Test(xnn_qu8_igemm_minmax_fp32_ukernel_2x2__scalar_lrintf, xnn_init_qu8_conv_minmax_fp32_scalar_lrintf_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
}
}
}
-TEST(QU8_IGEMM_MINMAX_FP32_2X2__SCALAR_LRINT, n_gt_2_strided_cn) {
+TEST(QU8_IGEMM_MINMAX_FP32_2X2__SCALAR_LRINTF, n_gt_2_strided_cn) {
for (uint32_t n = 3; n < 4; n++) {
for (size_t k = 1; k <= 5; k += 2) {
GemmMicrokernelTester()
@@ -49546,12 +49546,12 @@
.n(2)
.k(k)
.cn_stride(5)
- .Test(xnn_qu8_igemm_minmax_fp32_ukernel_2x2__scalar_lrint, xnn_init_qu8_conv_minmax_fp32_scalar_lrint_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
+ .Test(xnn_qu8_igemm_minmax_fp32_ukernel_2x2__scalar_lrintf, xnn_init_qu8_conv_minmax_fp32_scalar_lrintf_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
}
}
}
-TEST(QU8_IGEMM_MINMAX_FP32_2X2__SCALAR_LRINT, n_gt_2_subtile) {
+TEST(QU8_IGEMM_MINMAX_FP32_2X2__SCALAR_LRINTF, n_gt_2_subtile) {
for (uint32_t n = 3; n < 4; n++) {
for (size_t k = 1; k <= 5; k += 2) {
for (uint32_t m = 1; m <= 2; m++) {
@@ -49564,13 +49564,13 @@
.n(n)
.k(k)
.iterations(1)
- .Test(xnn_qu8_igemm_minmax_fp32_ukernel_2x2__scalar_lrint, xnn_init_qu8_conv_minmax_fp32_scalar_lrint_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
+ .Test(xnn_qu8_igemm_minmax_fp32_ukernel_2x2__scalar_lrintf, xnn_init_qu8_conv_minmax_fp32_scalar_lrintf_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
}
}
}
}
-TEST(QU8_IGEMM_MINMAX_FP32_2X2__SCALAR_LRINT, n_div_2) {
+TEST(QU8_IGEMM_MINMAX_FP32_2X2__SCALAR_LRINTF, n_div_2) {
for (uint32_t n = 4; n <= 6; n += 2) {
for (size_t k = 1; k <= 5; k += 2) {
GemmMicrokernelTester()
@@ -49581,12 +49581,12 @@
.m(2)
.n(2)
.k(k)
- .Test(xnn_qu8_igemm_minmax_fp32_ukernel_2x2__scalar_lrint, xnn_init_qu8_conv_minmax_fp32_scalar_lrint_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
+ .Test(xnn_qu8_igemm_minmax_fp32_ukernel_2x2__scalar_lrintf, xnn_init_qu8_conv_minmax_fp32_scalar_lrintf_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
}
}
}
-TEST(QU8_IGEMM_MINMAX_FP32_2X2__SCALAR_LRINT, n_div_2_strided_cn) {
+TEST(QU8_IGEMM_MINMAX_FP32_2X2__SCALAR_LRINTF, n_div_2_strided_cn) {
for (uint32_t n = 4; n <= 6; n += 2) {
for (size_t k = 1; k <= 5; k += 2) {
GemmMicrokernelTester()
@@ -49598,12 +49598,12 @@
.n(n)
.k(k)
.cn_stride(5)
- .Test(xnn_qu8_igemm_minmax_fp32_ukernel_2x2__scalar_lrint, xnn_init_qu8_conv_minmax_fp32_scalar_lrint_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
+ .Test(xnn_qu8_igemm_minmax_fp32_ukernel_2x2__scalar_lrintf, xnn_init_qu8_conv_minmax_fp32_scalar_lrintf_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
}
}
}
-TEST(QU8_IGEMM_MINMAX_FP32_2X2__SCALAR_LRINT, n_div_2_subtile) {
+TEST(QU8_IGEMM_MINMAX_FP32_2X2__SCALAR_LRINTF, n_div_2_subtile) {
for (uint32_t n = 4; n <= 6; n += 2) {
for (size_t k = 1; k <= 5; k += 2) {
for (uint32_t m = 1; m <= 2; m++) {
@@ -49616,13 +49616,13 @@
.n(n)
.k(k)
.iterations(1)
- .Test(xnn_qu8_igemm_minmax_fp32_ukernel_2x2__scalar_lrint, xnn_init_qu8_conv_minmax_fp32_scalar_lrint_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
+ .Test(xnn_qu8_igemm_minmax_fp32_ukernel_2x2__scalar_lrintf, xnn_init_qu8_conv_minmax_fp32_scalar_lrintf_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
}
}
}
}
-TEST(QU8_IGEMM_MINMAX_FP32_2X2__SCALAR_LRINT, small_kernel) {
+TEST(QU8_IGEMM_MINMAX_FP32_2X2__SCALAR_LRINTF, small_kernel) {
for (size_t k = 1; k <= 5; k += 2) {
GemmMicrokernelTester()
.mr(2)
@@ -49633,11 +49633,11 @@
.n(2)
.k(k)
.ks(3)
- .Test(xnn_qu8_igemm_minmax_fp32_ukernel_2x2__scalar_lrint, xnn_init_qu8_conv_minmax_fp32_scalar_lrint_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
+ .Test(xnn_qu8_igemm_minmax_fp32_ukernel_2x2__scalar_lrintf, xnn_init_qu8_conv_minmax_fp32_scalar_lrintf_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
}
}
-TEST(QU8_IGEMM_MINMAX_FP32_2X2__SCALAR_LRINT, small_kernel_subtile) {
+TEST(QU8_IGEMM_MINMAX_FP32_2X2__SCALAR_LRINTF, small_kernel_subtile) {
for (size_t k = 1; k <= 5; k += 2) {
for (uint32_t m = 1; m <= 2; m++) {
for (uint32_t n = 1; n <= 2; n++) {
@@ -49651,13 +49651,13 @@
.k(k)
.ks(3)
.iterations(1)
- .Test(xnn_qu8_igemm_minmax_fp32_ukernel_2x2__scalar_lrint, xnn_init_qu8_conv_minmax_fp32_scalar_lrint_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
+ .Test(xnn_qu8_igemm_minmax_fp32_ukernel_2x2__scalar_lrintf, xnn_init_qu8_conv_minmax_fp32_scalar_lrintf_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
}
}
}
}
-TEST(QU8_IGEMM_MINMAX_FP32_2X2__SCALAR_LRINT, n_gt_2_small_kernel) {
+TEST(QU8_IGEMM_MINMAX_FP32_2X2__SCALAR_LRINTF, n_gt_2_small_kernel) {
for (uint32_t n = 3; n < 4; n++) {
for (size_t k = 1; k <= 5; k += 2) {
GemmMicrokernelTester()
@@ -49669,12 +49669,12 @@
.n(2)
.k(k)
.ks(3)
- .Test(xnn_qu8_igemm_minmax_fp32_ukernel_2x2__scalar_lrint, xnn_init_qu8_conv_minmax_fp32_scalar_lrint_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
+ .Test(xnn_qu8_igemm_minmax_fp32_ukernel_2x2__scalar_lrintf, xnn_init_qu8_conv_minmax_fp32_scalar_lrintf_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
}
}
}
-TEST(QU8_IGEMM_MINMAX_FP32_2X2__SCALAR_LRINT, n_div_2_small_kernel) {
+TEST(QU8_IGEMM_MINMAX_FP32_2X2__SCALAR_LRINTF, n_div_2_small_kernel) {
for (uint32_t n = 4; n <= 6; n += 2) {
for (size_t k = 1; k <= 5; k += 2) {
GemmMicrokernelTester()
@@ -49686,12 +49686,12 @@
.n(2)
.k(k)
.ks(3)
- .Test(xnn_qu8_igemm_minmax_fp32_ukernel_2x2__scalar_lrint, xnn_init_qu8_conv_minmax_fp32_scalar_lrint_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
+ .Test(xnn_qu8_igemm_minmax_fp32_ukernel_2x2__scalar_lrintf, xnn_init_qu8_conv_minmax_fp32_scalar_lrintf_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
}
}
}
-TEST(QU8_IGEMM_MINMAX_FP32_2X2__SCALAR_LRINT, strided_cm_subtile) {
+TEST(QU8_IGEMM_MINMAX_FP32_2X2__SCALAR_LRINTF, strided_cm_subtile) {
for (size_t k = 1; k <= 5; k += 2) {
for (uint32_t m = 1; m <= 2; m++) {
for (uint32_t n = 1; n <= 2; n++) {
@@ -49705,13 +49705,13 @@
.k(k)
.cm_stride(5)
.iterations(1)
- .Test(xnn_qu8_igemm_minmax_fp32_ukernel_2x2__scalar_lrint, xnn_init_qu8_conv_minmax_fp32_scalar_lrint_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
+ .Test(xnn_qu8_igemm_minmax_fp32_ukernel_2x2__scalar_lrintf, xnn_init_qu8_conv_minmax_fp32_scalar_lrintf_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
}
}
}
}
-TEST(QU8_IGEMM_MINMAX_FP32_2X2__SCALAR_LRINT, a_offset) {
+TEST(QU8_IGEMM_MINMAX_FP32_2X2__SCALAR_LRINTF, a_offset) {
for (size_t k = 1; k <= 5; k += 2) {
GemmMicrokernelTester()
.mr(2)
@@ -49723,11 +49723,11 @@
.k(k)
.ks(3)
.a_offset(13)
- .Test(xnn_qu8_igemm_minmax_fp32_ukernel_2x2__scalar_lrint, xnn_init_qu8_conv_minmax_fp32_scalar_lrint_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
+ .Test(xnn_qu8_igemm_minmax_fp32_ukernel_2x2__scalar_lrintf, xnn_init_qu8_conv_minmax_fp32_scalar_lrintf_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
}
}
-TEST(QU8_IGEMM_MINMAX_FP32_2X2__SCALAR_LRINT, zero) {
+TEST(QU8_IGEMM_MINMAX_FP32_2X2__SCALAR_LRINTF, zero) {
for (uint32_t mz = 0; mz < 2; mz++) {
for (size_t k = 1; k <= 5; k += 2) {
GemmMicrokernelTester()
@@ -49741,12 +49741,12 @@
.ks(3)
.a_offset(13)
.zero_index(mz)
- .Test(xnn_qu8_igemm_minmax_fp32_ukernel_2x2__scalar_lrint, xnn_init_qu8_conv_minmax_fp32_scalar_lrint_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
+ .Test(xnn_qu8_igemm_minmax_fp32_ukernel_2x2__scalar_lrintf, xnn_init_qu8_conv_minmax_fp32_scalar_lrintf_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
}
}
}
-TEST(QU8_IGEMM_MINMAX_FP32_2X2__SCALAR_LRINT, qmin) {
+TEST(QU8_IGEMM_MINMAX_FP32_2X2__SCALAR_LRINTF, qmin) {
GemmMicrokernelTester()
.mr(2)
.nr(2)
@@ -49756,10 +49756,10 @@
.n(2)
.k(1)
.qmin(128)
- .Test(xnn_qu8_igemm_minmax_fp32_ukernel_2x2__scalar_lrint, xnn_init_qu8_conv_minmax_fp32_scalar_lrint_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
+ .Test(xnn_qu8_igemm_minmax_fp32_ukernel_2x2__scalar_lrintf, xnn_init_qu8_conv_minmax_fp32_scalar_lrintf_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
}
-TEST(QU8_IGEMM_MINMAX_FP32_2X2__SCALAR_LRINT, qmax) {
+TEST(QU8_IGEMM_MINMAX_FP32_2X2__SCALAR_LRINTF, qmax) {
GemmMicrokernelTester()
.mr(2)
.nr(2)
@@ -49769,10 +49769,10 @@
.n(2)
.k(1)
.qmax(128)
- .Test(xnn_qu8_igemm_minmax_fp32_ukernel_2x2__scalar_lrint, xnn_init_qu8_conv_minmax_fp32_scalar_lrint_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
+ .Test(xnn_qu8_igemm_minmax_fp32_ukernel_2x2__scalar_lrintf, xnn_init_qu8_conv_minmax_fp32_scalar_lrintf_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
}
-TEST(QU8_IGEMM_MINMAX_FP32_2X2__SCALAR_LRINT, strided_cm) {
+TEST(QU8_IGEMM_MINMAX_FP32_2X2__SCALAR_LRINTF, strided_cm) {
GemmMicrokernelTester()
.mr(2)
.nr(2)
@@ -49782,10 +49782,10 @@
.n(2)
.k(1)
.cm_stride(5)
- .Test(xnn_qu8_igemm_minmax_fp32_ukernel_2x2__scalar_lrint, xnn_init_qu8_conv_minmax_fp32_scalar_lrint_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
+ .Test(xnn_qu8_igemm_minmax_fp32_ukernel_2x2__scalar_lrintf, xnn_init_qu8_conv_minmax_fp32_scalar_lrintf_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
}
-TEST(QU8_IGEMM_MINMAX_FP32_2X2__SCALAR_LRINT, no_a_zero_point) {
+TEST(QU8_IGEMM_MINMAX_FP32_2X2__SCALAR_LRINTF, no_a_zero_point) {
for (size_t k = 1; k <= 5; k += 2) {
GemmMicrokernelTester()
.mr(2)
@@ -49796,11 +49796,11 @@
.n(2)
.k(k)
.a_zero_point(0)
- .Test(xnn_qu8_igemm_minmax_fp32_ukernel_2x2__scalar_lrint, xnn_init_qu8_conv_minmax_fp32_scalar_lrint_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
+ .Test(xnn_qu8_igemm_minmax_fp32_ukernel_2x2__scalar_lrintf, xnn_init_qu8_conv_minmax_fp32_scalar_lrintf_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
}
}
-TEST(QU8_IGEMM_MINMAX_FP32_2X2__SCALAR_LRINT, no_b_zero_point) {
+TEST(QU8_IGEMM_MINMAX_FP32_2X2__SCALAR_LRINTF, no_b_zero_point) {
for (size_t k = 1; k <= 5; k += 2) {
GemmMicrokernelTester()
.mr(2)
@@ -49811,11 +49811,11 @@
.n(2)
.k(k)
.b_zero_point(0)
- .Test(xnn_qu8_igemm_minmax_fp32_ukernel_2x2__scalar_lrint, xnn_init_qu8_conv_minmax_fp32_scalar_lrint_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
+ .Test(xnn_qu8_igemm_minmax_fp32_ukernel_2x2__scalar_lrintf, xnn_init_qu8_conv_minmax_fp32_scalar_lrintf_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
}
}
-TEST(QU8_IGEMM_MINMAX_FP32_2X2__SCALAR_LRINT, no_zero_point) {
+TEST(QU8_IGEMM_MINMAX_FP32_2X2__SCALAR_LRINTF, no_zero_point) {
for (size_t k = 1; k <= 5; k += 2) {
GemmMicrokernelTester()
.mr(2)
@@ -49827,11 +49827,11 @@
.k(k)
.a_zero_point(0)
.b_zero_point(0)
- .Test(xnn_qu8_igemm_minmax_fp32_ukernel_2x2__scalar_lrint, xnn_init_qu8_conv_minmax_fp32_scalar_lrint_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
+ .Test(xnn_qu8_igemm_minmax_fp32_ukernel_2x2__scalar_lrintf, xnn_init_qu8_conv_minmax_fp32_scalar_lrintf_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
}
}
-TEST(QU8_IGEMM_MINMAX_FP32_3X2__SCALAR_LRINT, k_eq_1) {
+TEST(QU8_IGEMM_MINMAX_FP32_3X2__SCALAR_LRINTF, k_eq_1) {
GemmMicrokernelTester()
.mr(3)
.nr(2)
@@ -49840,10 +49840,10 @@
.m(3)
.n(2)
.k(1)
- .Test(xnn_qu8_igemm_minmax_fp32_ukernel_3x2__scalar_lrint, xnn_init_qu8_conv_minmax_fp32_scalar_lrint_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
+ .Test(xnn_qu8_igemm_minmax_fp32_ukernel_3x2__scalar_lrintf, xnn_init_qu8_conv_minmax_fp32_scalar_lrintf_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
}
-TEST(QU8_IGEMM_MINMAX_FP32_3X2__SCALAR_LRINT, strided_cn) {
+TEST(QU8_IGEMM_MINMAX_FP32_3X2__SCALAR_LRINTF, strided_cn) {
GemmMicrokernelTester()
.mr(3)
.nr(2)
@@ -49853,10 +49853,10 @@
.n(2)
.k(1)
.cn_stride(5)
- .Test(xnn_qu8_igemm_minmax_fp32_ukernel_3x2__scalar_lrint, xnn_init_qu8_conv_minmax_fp32_scalar_lrint_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
+ .Test(xnn_qu8_igemm_minmax_fp32_ukernel_3x2__scalar_lrintf, xnn_init_qu8_conv_minmax_fp32_scalar_lrintf_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
}
-TEST(QU8_IGEMM_MINMAX_FP32_3X2__SCALAR_LRINT, k_eq_1_subtile) {
+TEST(QU8_IGEMM_MINMAX_FP32_3X2__SCALAR_LRINTF, k_eq_1_subtile) {
for (uint32_t m = 1; m <= 3; m++) {
for (uint32_t n = 1; n <= 2; n++) {
GemmMicrokernelTester()
@@ -49868,12 +49868,12 @@
.n(n)
.k(1)
.iterations(1)
- .Test(xnn_qu8_igemm_minmax_fp32_ukernel_3x2__scalar_lrint, xnn_init_qu8_conv_minmax_fp32_scalar_lrint_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
+ .Test(xnn_qu8_igemm_minmax_fp32_ukernel_3x2__scalar_lrintf, xnn_init_qu8_conv_minmax_fp32_scalar_lrintf_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
}
}
}
-TEST(QU8_IGEMM_MINMAX_FP32_3X2__SCALAR_LRINT, k_eq_1_subtile_m) {
+TEST(QU8_IGEMM_MINMAX_FP32_3X2__SCALAR_LRINTF, k_eq_1_subtile_m) {
for (uint32_t m = 1; m <= 3; m++) {
GemmMicrokernelTester()
.mr(3)
@@ -49884,11 +49884,11 @@
.n(2)
.k(1)
.iterations(1)
- .Test(xnn_qu8_igemm_minmax_fp32_ukernel_3x2__scalar_lrint, xnn_init_qu8_conv_minmax_fp32_scalar_lrint_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
+ .Test(xnn_qu8_igemm_minmax_fp32_ukernel_3x2__scalar_lrintf, xnn_init_qu8_conv_minmax_fp32_scalar_lrintf_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
}
}
-TEST(QU8_IGEMM_MINMAX_FP32_3X2__SCALAR_LRINT, k_eq_1_subtile_n) {
+TEST(QU8_IGEMM_MINMAX_FP32_3X2__SCALAR_LRINTF, k_eq_1_subtile_n) {
for (uint32_t n = 1; n <= 2; n++) {
GemmMicrokernelTester()
.mr(3)
@@ -49899,11 +49899,11 @@
.n(n)
.k(1)
.iterations(1)
- .Test(xnn_qu8_igemm_minmax_fp32_ukernel_3x2__scalar_lrint, xnn_init_qu8_conv_minmax_fp32_scalar_lrint_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
+ .Test(xnn_qu8_igemm_minmax_fp32_ukernel_3x2__scalar_lrintf, xnn_init_qu8_conv_minmax_fp32_scalar_lrintf_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
}
}
-TEST(QU8_IGEMM_MINMAX_FP32_3X2__SCALAR_LRINT, k_gt_1) {
+TEST(QU8_IGEMM_MINMAX_FP32_3X2__SCALAR_LRINTF, k_gt_1) {
for (size_t k = 2; k < 10; k++) {
GemmMicrokernelTester()
.mr(3)
@@ -49913,11 +49913,11 @@
.m(3)
.n(2)
.k(k)
- .Test(xnn_qu8_igemm_minmax_fp32_ukernel_3x2__scalar_lrint, xnn_init_qu8_conv_minmax_fp32_scalar_lrint_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
+ .Test(xnn_qu8_igemm_minmax_fp32_ukernel_3x2__scalar_lrintf, xnn_init_qu8_conv_minmax_fp32_scalar_lrintf_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
}
}
-TEST(QU8_IGEMM_MINMAX_FP32_3X2__SCALAR_LRINT, k_gt_1_subtile) {
+TEST(QU8_IGEMM_MINMAX_FP32_3X2__SCALAR_LRINTF, k_gt_1_subtile) {
for (size_t k = 2; k < 10; k++) {
for (uint32_t m = 1; m <= 3; m++) {
for (uint32_t n = 1; n <= 2; n++) {
@@ -49930,13 +49930,13 @@
.n(n)
.k(k)
.iterations(1)
- .Test(xnn_qu8_igemm_minmax_fp32_ukernel_3x2__scalar_lrint, xnn_init_qu8_conv_minmax_fp32_scalar_lrint_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
+ .Test(xnn_qu8_igemm_minmax_fp32_ukernel_3x2__scalar_lrintf, xnn_init_qu8_conv_minmax_fp32_scalar_lrintf_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
}
}
}
}
-TEST(QU8_IGEMM_MINMAX_FP32_3X2__SCALAR_LRINT, n_gt_2) {
+TEST(QU8_IGEMM_MINMAX_FP32_3X2__SCALAR_LRINTF, n_gt_2) {
for (uint32_t n = 3; n < 4; n++) {
for (size_t k = 1; k <= 5; k += 2) {
GemmMicrokernelTester()
@@ -49947,12 +49947,12 @@
.m(3)
.n(2)
.k(k)
- .Test(xnn_qu8_igemm_minmax_fp32_ukernel_3x2__scalar_lrint, xnn_init_qu8_conv_minmax_fp32_scalar_lrint_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
+ .Test(xnn_qu8_igemm_minmax_fp32_ukernel_3x2__scalar_lrintf, xnn_init_qu8_conv_minmax_fp32_scalar_lrintf_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
}
}
}
-TEST(QU8_IGEMM_MINMAX_FP32_3X2__SCALAR_LRINT, n_gt_2_strided_cn) {
+TEST(QU8_IGEMM_MINMAX_FP32_3X2__SCALAR_LRINTF, n_gt_2_strided_cn) {
for (uint32_t n = 3; n < 4; n++) {
for (size_t k = 1; k <= 5; k += 2) {
GemmMicrokernelTester()
@@ -49964,12 +49964,12 @@
.n(2)
.k(k)
.cn_stride(5)
- .Test(xnn_qu8_igemm_minmax_fp32_ukernel_3x2__scalar_lrint, xnn_init_qu8_conv_minmax_fp32_scalar_lrint_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
+ .Test(xnn_qu8_igemm_minmax_fp32_ukernel_3x2__scalar_lrintf, xnn_init_qu8_conv_minmax_fp32_scalar_lrintf_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
}
}
}
-TEST(QU8_IGEMM_MINMAX_FP32_3X2__SCALAR_LRINT, n_gt_2_subtile) {
+TEST(QU8_IGEMM_MINMAX_FP32_3X2__SCALAR_LRINTF, n_gt_2_subtile) {
for (uint32_t n = 3; n < 4; n++) {
for (size_t k = 1; k <= 5; k += 2) {
for (uint32_t m = 1; m <= 3; m++) {
@@ -49982,13 +49982,13 @@
.n(n)
.k(k)
.iterations(1)
- .Test(xnn_qu8_igemm_minmax_fp32_ukernel_3x2__scalar_lrint, xnn_init_qu8_conv_minmax_fp32_scalar_lrint_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
+ .Test(xnn_qu8_igemm_minmax_fp32_ukernel_3x2__scalar_lrintf, xnn_init_qu8_conv_minmax_fp32_scalar_lrintf_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
}
}
}
}
-TEST(QU8_IGEMM_MINMAX_FP32_3X2__SCALAR_LRINT, n_div_2) {
+TEST(QU8_IGEMM_MINMAX_FP32_3X2__SCALAR_LRINTF, n_div_2) {
for (uint32_t n = 4; n <= 6; n += 2) {
for (size_t k = 1; k <= 5; k += 2) {
GemmMicrokernelTester()
@@ -49999,12 +49999,12 @@
.m(3)
.n(2)
.k(k)
- .Test(xnn_qu8_igemm_minmax_fp32_ukernel_3x2__scalar_lrint, xnn_init_qu8_conv_minmax_fp32_scalar_lrint_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
+ .Test(xnn_qu8_igemm_minmax_fp32_ukernel_3x2__scalar_lrintf, xnn_init_qu8_conv_minmax_fp32_scalar_lrintf_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
}
}
}
-TEST(QU8_IGEMM_MINMAX_FP32_3X2__SCALAR_LRINT, n_div_2_strided_cn) {
+TEST(QU8_IGEMM_MINMAX_FP32_3X2__SCALAR_LRINTF, n_div_2_strided_cn) {
for (uint32_t n = 4; n <= 6; n += 2) {
for (size_t k = 1; k <= 5; k += 2) {
GemmMicrokernelTester()
@@ -50016,12 +50016,12 @@
.n(n)
.k(k)
.cn_stride(5)
- .Test(xnn_qu8_igemm_minmax_fp32_ukernel_3x2__scalar_lrint, xnn_init_qu8_conv_minmax_fp32_scalar_lrint_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
+ .Test(xnn_qu8_igemm_minmax_fp32_ukernel_3x2__scalar_lrintf, xnn_init_qu8_conv_minmax_fp32_scalar_lrintf_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
}
}
}
-TEST(QU8_IGEMM_MINMAX_FP32_3X2__SCALAR_LRINT, n_div_2_subtile) {
+TEST(QU8_IGEMM_MINMAX_FP32_3X2__SCALAR_LRINTF, n_div_2_subtile) {
for (uint32_t n = 4; n <= 6; n += 2) {
for (size_t k = 1; k <= 5; k += 2) {
for (uint32_t m = 1; m <= 3; m++) {
@@ -50034,13 +50034,13 @@
.n(n)
.k(k)
.iterations(1)
- .Test(xnn_qu8_igemm_minmax_fp32_ukernel_3x2__scalar_lrint, xnn_init_qu8_conv_minmax_fp32_scalar_lrint_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
+ .Test(xnn_qu8_igemm_minmax_fp32_ukernel_3x2__scalar_lrintf, xnn_init_qu8_conv_minmax_fp32_scalar_lrintf_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
}
}
}
}
-TEST(QU8_IGEMM_MINMAX_FP32_3X2__SCALAR_LRINT, small_kernel) {
+TEST(QU8_IGEMM_MINMAX_FP32_3X2__SCALAR_LRINTF, small_kernel) {
for (size_t k = 1; k <= 5; k += 2) {
GemmMicrokernelTester()
.mr(3)
@@ -50051,11 +50051,11 @@
.n(2)
.k(k)
.ks(3)
- .Test(xnn_qu8_igemm_minmax_fp32_ukernel_3x2__scalar_lrint, xnn_init_qu8_conv_minmax_fp32_scalar_lrint_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
+ .Test(xnn_qu8_igemm_minmax_fp32_ukernel_3x2__scalar_lrintf, xnn_init_qu8_conv_minmax_fp32_scalar_lrintf_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
}
}
-TEST(QU8_IGEMM_MINMAX_FP32_3X2__SCALAR_LRINT, small_kernel_subtile) {
+TEST(QU8_IGEMM_MINMAX_FP32_3X2__SCALAR_LRINTF, small_kernel_subtile) {
for (size_t k = 1; k <= 5; k += 2) {
for (uint32_t m = 1; m <= 3; m++) {
for (uint32_t n = 1; n <= 2; n++) {
@@ -50069,13 +50069,13 @@
.k(k)
.ks(3)
.iterations(1)
- .Test(xnn_qu8_igemm_minmax_fp32_ukernel_3x2__scalar_lrint, xnn_init_qu8_conv_minmax_fp32_scalar_lrint_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
+ .Test(xnn_qu8_igemm_minmax_fp32_ukernel_3x2__scalar_lrintf, xnn_init_qu8_conv_minmax_fp32_scalar_lrintf_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
}
}
}
}
-TEST(QU8_IGEMM_MINMAX_FP32_3X2__SCALAR_LRINT, n_gt_2_small_kernel) {
+TEST(QU8_IGEMM_MINMAX_FP32_3X2__SCALAR_LRINTF, n_gt_2_small_kernel) {
for (uint32_t n = 3; n < 4; n++) {
for (size_t k = 1; k <= 5; k += 2) {
GemmMicrokernelTester()
@@ -50087,12 +50087,12 @@
.n(2)
.k(k)
.ks(3)
- .Test(xnn_qu8_igemm_minmax_fp32_ukernel_3x2__scalar_lrint, xnn_init_qu8_conv_minmax_fp32_scalar_lrint_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
+ .Test(xnn_qu8_igemm_minmax_fp32_ukernel_3x2__scalar_lrintf, xnn_init_qu8_conv_minmax_fp32_scalar_lrintf_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
}
}
}
-TEST(QU8_IGEMM_MINMAX_FP32_3X2__SCALAR_LRINT, n_div_2_small_kernel) {
+TEST(QU8_IGEMM_MINMAX_FP32_3X2__SCALAR_LRINTF, n_div_2_small_kernel) {
for (uint32_t n = 4; n <= 6; n += 2) {
for (size_t k = 1; k <= 5; k += 2) {
GemmMicrokernelTester()
@@ -50104,12 +50104,12 @@
.n(2)
.k(k)
.ks(3)
- .Test(xnn_qu8_igemm_minmax_fp32_ukernel_3x2__scalar_lrint, xnn_init_qu8_conv_minmax_fp32_scalar_lrint_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
+ .Test(xnn_qu8_igemm_minmax_fp32_ukernel_3x2__scalar_lrintf, xnn_init_qu8_conv_minmax_fp32_scalar_lrintf_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
}
}
}
-TEST(QU8_IGEMM_MINMAX_FP32_3X2__SCALAR_LRINT, strided_cm_subtile) {
+TEST(QU8_IGEMM_MINMAX_FP32_3X2__SCALAR_LRINTF, strided_cm_subtile) {
for (size_t k = 1; k <= 5; k += 2) {
for (uint32_t m = 1; m <= 3; m++) {
for (uint32_t n = 1; n <= 2; n++) {
@@ -50123,13 +50123,13 @@
.k(k)
.cm_stride(5)
.iterations(1)
- .Test(xnn_qu8_igemm_minmax_fp32_ukernel_3x2__scalar_lrint, xnn_init_qu8_conv_minmax_fp32_scalar_lrint_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
+ .Test(xnn_qu8_igemm_minmax_fp32_ukernel_3x2__scalar_lrintf, xnn_init_qu8_conv_minmax_fp32_scalar_lrintf_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
}
}
}
}
-TEST(QU8_IGEMM_MINMAX_FP32_3X2__SCALAR_LRINT, a_offset) {
+TEST(QU8_IGEMM_MINMAX_FP32_3X2__SCALAR_LRINTF, a_offset) {
for (size_t k = 1; k <= 5; k += 2) {
GemmMicrokernelTester()
.mr(3)
@@ -50141,11 +50141,11 @@
.k(k)
.ks(3)
.a_offset(17)
- .Test(xnn_qu8_igemm_minmax_fp32_ukernel_3x2__scalar_lrint, xnn_init_qu8_conv_minmax_fp32_scalar_lrint_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
+ .Test(xnn_qu8_igemm_minmax_fp32_ukernel_3x2__scalar_lrintf, xnn_init_qu8_conv_minmax_fp32_scalar_lrintf_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
}
}
-TEST(QU8_IGEMM_MINMAX_FP32_3X2__SCALAR_LRINT, zero) {
+TEST(QU8_IGEMM_MINMAX_FP32_3X2__SCALAR_LRINTF, zero) {
for (uint32_t mz = 0; mz < 3; mz++) {
for (size_t k = 1; k <= 5; k += 2) {
GemmMicrokernelTester()
@@ -50159,12 +50159,12 @@
.ks(3)
.a_offset(17)
.zero_index(mz)
- .Test(xnn_qu8_igemm_minmax_fp32_ukernel_3x2__scalar_lrint, xnn_init_qu8_conv_minmax_fp32_scalar_lrint_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
+ .Test(xnn_qu8_igemm_minmax_fp32_ukernel_3x2__scalar_lrintf, xnn_init_qu8_conv_minmax_fp32_scalar_lrintf_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
}
}
}
-TEST(QU8_IGEMM_MINMAX_FP32_3X2__SCALAR_LRINT, qmin) {
+TEST(QU8_IGEMM_MINMAX_FP32_3X2__SCALAR_LRINTF, qmin) {
GemmMicrokernelTester()
.mr(3)
.nr(2)
@@ -50174,10 +50174,10 @@
.n(2)
.k(1)
.qmin(128)
- .Test(xnn_qu8_igemm_minmax_fp32_ukernel_3x2__scalar_lrint, xnn_init_qu8_conv_minmax_fp32_scalar_lrint_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
+ .Test(xnn_qu8_igemm_minmax_fp32_ukernel_3x2__scalar_lrintf, xnn_init_qu8_conv_minmax_fp32_scalar_lrintf_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
}
-TEST(QU8_IGEMM_MINMAX_FP32_3X2__SCALAR_LRINT, qmax) {
+TEST(QU8_IGEMM_MINMAX_FP32_3X2__SCALAR_LRINTF, qmax) {
GemmMicrokernelTester()
.mr(3)
.nr(2)
@@ -50187,10 +50187,10 @@
.n(2)
.k(1)
.qmax(128)
- .Test(xnn_qu8_igemm_minmax_fp32_ukernel_3x2__scalar_lrint, xnn_init_qu8_conv_minmax_fp32_scalar_lrint_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
+ .Test(xnn_qu8_igemm_minmax_fp32_ukernel_3x2__scalar_lrintf, xnn_init_qu8_conv_minmax_fp32_scalar_lrintf_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
}
-TEST(QU8_IGEMM_MINMAX_FP32_3X2__SCALAR_LRINT, strided_cm) {
+TEST(QU8_IGEMM_MINMAX_FP32_3X2__SCALAR_LRINTF, strided_cm) {
GemmMicrokernelTester()
.mr(3)
.nr(2)
@@ -50200,10 +50200,10 @@
.n(2)
.k(1)
.cm_stride(5)
- .Test(xnn_qu8_igemm_minmax_fp32_ukernel_3x2__scalar_lrint, xnn_init_qu8_conv_minmax_fp32_scalar_lrint_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
+ .Test(xnn_qu8_igemm_minmax_fp32_ukernel_3x2__scalar_lrintf, xnn_init_qu8_conv_minmax_fp32_scalar_lrintf_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
}
-TEST(QU8_IGEMM_MINMAX_FP32_3X2__SCALAR_LRINT, no_a_zero_point) {
+TEST(QU8_IGEMM_MINMAX_FP32_3X2__SCALAR_LRINTF, no_a_zero_point) {
for (size_t k = 1; k <= 5; k += 2) {
GemmMicrokernelTester()
.mr(3)
@@ -50214,11 +50214,11 @@
.n(2)
.k(k)
.a_zero_point(0)
- .Test(xnn_qu8_igemm_minmax_fp32_ukernel_3x2__scalar_lrint, xnn_init_qu8_conv_minmax_fp32_scalar_lrint_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
+ .Test(xnn_qu8_igemm_minmax_fp32_ukernel_3x2__scalar_lrintf, xnn_init_qu8_conv_minmax_fp32_scalar_lrintf_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
}
}
-TEST(QU8_IGEMM_MINMAX_FP32_3X2__SCALAR_LRINT, no_b_zero_point) {
+TEST(QU8_IGEMM_MINMAX_FP32_3X2__SCALAR_LRINTF, no_b_zero_point) {
for (size_t k = 1; k <= 5; k += 2) {
GemmMicrokernelTester()
.mr(3)
@@ -50229,11 +50229,11 @@
.n(2)
.k(k)
.b_zero_point(0)
- .Test(xnn_qu8_igemm_minmax_fp32_ukernel_3x2__scalar_lrint, xnn_init_qu8_conv_minmax_fp32_scalar_lrint_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
+ .Test(xnn_qu8_igemm_minmax_fp32_ukernel_3x2__scalar_lrintf, xnn_init_qu8_conv_minmax_fp32_scalar_lrintf_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
}
}
-TEST(QU8_IGEMM_MINMAX_FP32_3X2__SCALAR_LRINT, no_zero_point) {
+TEST(QU8_IGEMM_MINMAX_FP32_3X2__SCALAR_LRINTF, no_zero_point) {
for (size_t k = 1; k <= 5; k += 2) {
GemmMicrokernelTester()
.mr(3)
@@ -50245,11 +50245,11 @@
.k(k)
.a_zero_point(0)
.b_zero_point(0)
- .Test(xnn_qu8_igemm_minmax_fp32_ukernel_3x2__scalar_lrint, xnn_init_qu8_conv_minmax_fp32_scalar_lrint_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
+ .Test(xnn_qu8_igemm_minmax_fp32_ukernel_3x2__scalar_lrintf, xnn_init_qu8_conv_minmax_fp32_scalar_lrintf_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
}
}
-TEST(QU8_IGEMM_MINMAX_FP32_4X2__SCALAR_LRINT, k_eq_1) {
+TEST(QU8_IGEMM_MINMAX_FP32_4X2__SCALAR_LRINTF, k_eq_1) {
GemmMicrokernelTester()
.mr(4)
.nr(2)
@@ -50258,10 +50258,10 @@
.m(4)
.n(2)
.k(1)
- .Test(xnn_qu8_igemm_minmax_fp32_ukernel_4x2__scalar_lrint, xnn_init_qu8_conv_minmax_fp32_scalar_lrint_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
+ .Test(xnn_qu8_igemm_minmax_fp32_ukernel_4x2__scalar_lrintf, xnn_init_qu8_conv_minmax_fp32_scalar_lrintf_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
}
-TEST(QU8_IGEMM_MINMAX_FP32_4X2__SCALAR_LRINT, strided_cn) {
+TEST(QU8_IGEMM_MINMAX_FP32_4X2__SCALAR_LRINTF, strided_cn) {
GemmMicrokernelTester()
.mr(4)
.nr(2)
@@ -50271,10 +50271,10 @@
.n(2)
.k(1)
.cn_stride(5)
- .Test(xnn_qu8_igemm_minmax_fp32_ukernel_4x2__scalar_lrint, xnn_init_qu8_conv_minmax_fp32_scalar_lrint_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
+ .Test(xnn_qu8_igemm_minmax_fp32_ukernel_4x2__scalar_lrintf, xnn_init_qu8_conv_minmax_fp32_scalar_lrintf_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
}
-TEST(QU8_IGEMM_MINMAX_FP32_4X2__SCALAR_LRINT, k_eq_1_subtile) {
+TEST(QU8_IGEMM_MINMAX_FP32_4X2__SCALAR_LRINTF, k_eq_1_subtile) {
for (uint32_t m = 1; m <= 4; m++) {
for (uint32_t n = 1; n <= 2; n++) {
GemmMicrokernelTester()
@@ -50286,12 +50286,12 @@
.n(n)
.k(1)
.iterations(1)
- .Test(xnn_qu8_igemm_minmax_fp32_ukernel_4x2__scalar_lrint, xnn_init_qu8_conv_minmax_fp32_scalar_lrint_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
+ .Test(xnn_qu8_igemm_minmax_fp32_ukernel_4x2__scalar_lrintf, xnn_init_qu8_conv_minmax_fp32_scalar_lrintf_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
}
}
}
-TEST(QU8_IGEMM_MINMAX_FP32_4X2__SCALAR_LRINT, k_eq_1_subtile_m) {
+TEST(QU8_IGEMM_MINMAX_FP32_4X2__SCALAR_LRINTF, k_eq_1_subtile_m) {
for (uint32_t m = 1; m <= 4; m++) {
GemmMicrokernelTester()
.mr(4)
@@ -50302,11 +50302,11 @@
.n(2)
.k(1)
.iterations(1)
- .Test(xnn_qu8_igemm_minmax_fp32_ukernel_4x2__scalar_lrint, xnn_init_qu8_conv_minmax_fp32_scalar_lrint_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
+ .Test(xnn_qu8_igemm_minmax_fp32_ukernel_4x2__scalar_lrintf, xnn_init_qu8_conv_minmax_fp32_scalar_lrintf_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
}
}
-TEST(QU8_IGEMM_MINMAX_FP32_4X2__SCALAR_LRINT, k_eq_1_subtile_n) {
+TEST(QU8_IGEMM_MINMAX_FP32_4X2__SCALAR_LRINTF, k_eq_1_subtile_n) {
for (uint32_t n = 1; n <= 2; n++) {
GemmMicrokernelTester()
.mr(4)
@@ -50317,11 +50317,11 @@
.n(n)
.k(1)
.iterations(1)
- .Test(xnn_qu8_igemm_minmax_fp32_ukernel_4x2__scalar_lrint, xnn_init_qu8_conv_minmax_fp32_scalar_lrint_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
+ .Test(xnn_qu8_igemm_minmax_fp32_ukernel_4x2__scalar_lrintf, xnn_init_qu8_conv_minmax_fp32_scalar_lrintf_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
}
}
-TEST(QU8_IGEMM_MINMAX_FP32_4X2__SCALAR_LRINT, k_gt_1) {
+TEST(QU8_IGEMM_MINMAX_FP32_4X2__SCALAR_LRINTF, k_gt_1) {
for (size_t k = 2; k < 10; k++) {
GemmMicrokernelTester()
.mr(4)
@@ -50331,11 +50331,11 @@
.m(4)
.n(2)
.k(k)
- .Test(xnn_qu8_igemm_minmax_fp32_ukernel_4x2__scalar_lrint, xnn_init_qu8_conv_minmax_fp32_scalar_lrint_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
+ .Test(xnn_qu8_igemm_minmax_fp32_ukernel_4x2__scalar_lrintf, xnn_init_qu8_conv_minmax_fp32_scalar_lrintf_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
}
}
-TEST(QU8_IGEMM_MINMAX_FP32_4X2__SCALAR_LRINT, k_gt_1_subtile) {
+TEST(QU8_IGEMM_MINMAX_FP32_4X2__SCALAR_LRINTF, k_gt_1_subtile) {
for (size_t k = 2; k < 10; k++) {
for (uint32_t m = 1; m <= 4; m++) {
for (uint32_t n = 1; n <= 2; n++) {
@@ -50348,13 +50348,13 @@
.n(n)
.k(k)
.iterations(1)
- .Test(xnn_qu8_igemm_minmax_fp32_ukernel_4x2__scalar_lrint, xnn_init_qu8_conv_minmax_fp32_scalar_lrint_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
+ .Test(xnn_qu8_igemm_minmax_fp32_ukernel_4x2__scalar_lrintf, xnn_init_qu8_conv_minmax_fp32_scalar_lrintf_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
}
}
}
}
-TEST(QU8_IGEMM_MINMAX_FP32_4X2__SCALAR_LRINT, n_gt_2) {
+TEST(QU8_IGEMM_MINMAX_FP32_4X2__SCALAR_LRINTF, n_gt_2) {
for (uint32_t n = 3; n < 4; n++) {
for (size_t k = 1; k <= 5; k += 2) {
GemmMicrokernelTester()
@@ -50365,12 +50365,12 @@
.m(4)
.n(2)
.k(k)
- .Test(xnn_qu8_igemm_minmax_fp32_ukernel_4x2__scalar_lrint, xnn_init_qu8_conv_minmax_fp32_scalar_lrint_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
+ .Test(xnn_qu8_igemm_minmax_fp32_ukernel_4x2__scalar_lrintf, xnn_init_qu8_conv_minmax_fp32_scalar_lrintf_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
}
}
}
-TEST(QU8_IGEMM_MINMAX_FP32_4X2__SCALAR_LRINT, n_gt_2_strided_cn) {
+TEST(QU8_IGEMM_MINMAX_FP32_4X2__SCALAR_LRINTF, n_gt_2_strided_cn) {
for (uint32_t n = 3; n < 4; n++) {
for (size_t k = 1; k <= 5; k += 2) {
GemmMicrokernelTester()
@@ -50382,12 +50382,12 @@
.n(2)
.k(k)
.cn_stride(5)
- .Test(xnn_qu8_igemm_minmax_fp32_ukernel_4x2__scalar_lrint, xnn_init_qu8_conv_minmax_fp32_scalar_lrint_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
+ .Test(xnn_qu8_igemm_minmax_fp32_ukernel_4x2__scalar_lrintf, xnn_init_qu8_conv_minmax_fp32_scalar_lrintf_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
}
}
}
-TEST(QU8_IGEMM_MINMAX_FP32_4X2__SCALAR_LRINT, n_gt_2_subtile) {
+TEST(QU8_IGEMM_MINMAX_FP32_4X2__SCALAR_LRINTF, n_gt_2_subtile) {
for (uint32_t n = 3; n < 4; n++) {
for (size_t k = 1; k <= 5; k += 2) {
for (uint32_t m = 1; m <= 4; m++) {
@@ -50400,13 +50400,13 @@
.n(n)
.k(k)
.iterations(1)
- .Test(xnn_qu8_igemm_minmax_fp32_ukernel_4x2__scalar_lrint, xnn_init_qu8_conv_minmax_fp32_scalar_lrint_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
+ .Test(xnn_qu8_igemm_minmax_fp32_ukernel_4x2__scalar_lrintf, xnn_init_qu8_conv_minmax_fp32_scalar_lrintf_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
}
}
}
}
-TEST(QU8_IGEMM_MINMAX_FP32_4X2__SCALAR_LRINT, n_div_2) {
+TEST(QU8_IGEMM_MINMAX_FP32_4X2__SCALAR_LRINTF, n_div_2) {
for (uint32_t n = 4; n <= 6; n += 2) {
for (size_t k = 1; k <= 5; k += 2) {
GemmMicrokernelTester()
@@ -50417,12 +50417,12 @@
.m(4)
.n(2)
.k(k)
- .Test(xnn_qu8_igemm_minmax_fp32_ukernel_4x2__scalar_lrint, xnn_init_qu8_conv_minmax_fp32_scalar_lrint_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
+ .Test(xnn_qu8_igemm_minmax_fp32_ukernel_4x2__scalar_lrintf, xnn_init_qu8_conv_minmax_fp32_scalar_lrintf_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
}
}
}
-TEST(QU8_IGEMM_MINMAX_FP32_4X2__SCALAR_LRINT, n_div_2_strided_cn) {
+TEST(QU8_IGEMM_MINMAX_FP32_4X2__SCALAR_LRINTF, n_div_2_strided_cn) {
for (uint32_t n = 4; n <= 6; n += 2) {
for (size_t k = 1; k <= 5; k += 2) {
GemmMicrokernelTester()
@@ -50434,12 +50434,12 @@
.n(n)
.k(k)
.cn_stride(5)
- .Test(xnn_qu8_igemm_minmax_fp32_ukernel_4x2__scalar_lrint, xnn_init_qu8_conv_minmax_fp32_scalar_lrint_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
+ .Test(xnn_qu8_igemm_minmax_fp32_ukernel_4x2__scalar_lrintf, xnn_init_qu8_conv_minmax_fp32_scalar_lrintf_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
}
}
}
-TEST(QU8_IGEMM_MINMAX_FP32_4X2__SCALAR_LRINT, n_div_2_subtile) {
+TEST(QU8_IGEMM_MINMAX_FP32_4X2__SCALAR_LRINTF, n_div_2_subtile) {
for (uint32_t n = 4; n <= 6; n += 2) {
for (size_t k = 1; k <= 5; k += 2) {
for (uint32_t m = 1; m <= 4; m++) {
@@ -50452,13 +50452,13 @@
.n(n)
.k(k)
.iterations(1)
- .Test(xnn_qu8_igemm_minmax_fp32_ukernel_4x2__scalar_lrint, xnn_init_qu8_conv_minmax_fp32_scalar_lrint_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
+ .Test(xnn_qu8_igemm_minmax_fp32_ukernel_4x2__scalar_lrintf, xnn_init_qu8_conv_minmax_fp32_scalar_lrintf_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
}
}
}
}
-TEST(QU8_IGEMM_MINMAX_FP32_4X2__SCALAR_LRINT, small_kernel) {
+TEST(QU8_IGEMM_MINMAX_FP32_4X2__SCALAR_LRINTF, small_kernel) {
for (size_t k = 1; k <= 5; k += 2) {
GemmMicrokernelTester()
.mr(4)
@@ -50469,11 +50469,11 @@
.n(2)
.k(k)
.ks(3)
- .Test(xnn_qu8_igemm_minmax_fp32_ukernel_4x2__scalar_lrint, xnn_init_qu8_conv_minmax_fp32_scalar_lrint_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
+ .Test(xnn_qu8_igemm_minmax_fp32_ukernel_4x2__scalar_lrintf, xnn_init_qu8_conv_minmax_fp32_scalar_lrintf_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
}
}
-TEST(QU8_IGEMM_MINMAX_FP32_4X2__SCALAR_LRINT, small_kernel_subtile) {
+TEST(QU8_IGEMM_MINMAX_FP32_4X2__SCALAR_LRINTF, small_kernel_subtile) {
for (size_t k = 1; k <= 5; k += 2) {
for (uint32_t m = 1; m <= 4; m++) {
for (uint32_t n = 1; n <= 2; n++) {
@@ -50487,13 +50487,13 @@
.k(k)
.ks(3)
.iterations(1)
- .Test(xnn_qu8_igemm_minmax_fp32_ukernel_4x2__scalar_lrint, xnn_init_qu8_conv_minmax_fp32_scalar_lrint_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
+ .Test(xnn_qu8_igemm_minmax_fp32_ukernel_4x2__scalar_lrintf, xnn_init_qu8_conv_minmax_fp32_scalar_lrintf_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
}
}
}
}
-TEST(QU8_IGEMM_MINMAX_FP32_4X2__SCALAR_LRINT, n_gt_2_small_kernel) {
+TEST(QU8_IGEMM_MINMAX_FP32_4X2__SCALAR_LRINTF, n_gt_2_small_kernel) {
for (uint32_t n = 3; n < 4; n++) {
for (size_t k = 1; k <= 5; k += 2) {
GemmMicrokernelTester()
@@ -50505,12 +50505,12 @@
.n(2)
.k(k)
.ks(3)
- .Test(xnn_qu8_igemm_minmax_fp32_ukernel_4x2__scalar_lrint, xnn_init_qu8_conv_minmax_fp32_scalar_lrint_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
+ .Test(xnn_qu8_igemm_minmax_fp32_ukernel_4x2__scalar_lrintf, xnn_init_qu8_conv_minmax_fp32_scalar_lrintf_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
}
}
}
-TEST(QU8_IGEMM_MINMAX_FP32_4X2__SCALAR_LRINT, n_div_2_small_kernel) {
+TEST(QU8_IGEMM_MINMAX_FP32_4X2__SCALAR_LRINTF, n_div_2_small_kernel) {
for (uint32_t n = 4; n <= 6; n += 2) {
for (size_t k = 1; k <= 5; k += 2) {
GemmMicrokernelTester()
@@ -50522,12 +50522,12 @@
.n(2)
.k(k)
.ks(3)
- .Test(xnn_qu8_igemm_minmax_fp32_ukernel_4x2__scalar_lrint, xnn_init_qu8_conv_minmax_fp32_scalar_lrint_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
+ .Test(xnn_qu8_igemm_minmax_fp32_ukernel_4x2__scalar_lrintf, xnn_init_qu8_conv_minmax_fp32_scalar_lrintf_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
}
}
}
-TEST(QU8_IGEMM_MINMAX_FP32_4X2__SCALAR_LRINT, strided_cm_subtile) {
+TEST(QU8_IGEMM_MINMAX_FP32_4X2__SCALAR_LRINTF, strided_cm_subtile) {
for (size_t k = 1; k <= 5; k += 2) {
for (uint32_t m = 1; m <= 4; m++) {
for (uint32_t n = 1; n <= 2; n++) {
@@ -50541,13 +50541,13 @@
.k(k)
.cm_stride(5)
.iterations(1)
- .Test(xnn_qu8_igemm_minmax_fp32_ukernel_4x2__scalar_lrint, xnn_init_qu8_conv_minmax_fp32_scalar_lrint_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
+ .Test(xnn_qu8_igemm_minmax_fp32_ukernel_4x2__scalar_lrintf, xnn_init_qu8_conv_minmax_fp32_scalar_lrintf_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
}
}
}
}
-TEST(QU8_IGEMM_MINMAX_FP32_4X2__SCALAR_LRINT, a_offset) {
+TEST(QU8_IGEMM_MINMAX_FP32_4X2__SCALAR_LRINTF, a_offset) {
for (size_t k = 1; k <= 5; k += 2) {
GemmMicrokernelTester()
.mr(4)
@@ -50559,11 +50559,11 @@
.k(k)
.ks(3)
.a_offset(23)
- .Test(xnn_qu8_igemm_minmax_fp32_ukernel_4x2__scalar_lrint, xnn_init_qu8_conv_minmax_fp32_scalar_lrint_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
+ .Test(xnn_qu8_igemm_minmax_fp32_ukernel_4x2__scalar_lrintf, xnn_init_qu8_conv_minmax_fp32_scalar_lrintf_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
}
}
-TEST(QU8_IGEMM_MINMAX_FP32_4X2__SCALAR_LRINT, zero) {
+TEST(QU8_IGEMM_MINMAX_FP32_4X2__SCALAR_LRINTF, zero) {
for (uint32_t mz = 0; mz < 4; mz++) {
for (size_t k = 1; k <= 5; k += 2) {
GemmMicrokernelTester()
@@ -50577,12 +50577,12 @@
.ks(3)
.a_offset(23)
.zero_index(mz)
- .Test(xnn_qu8_igemm_minmax_fp32_ukernel_4x2__scalar_lrint, xnn_init_qu8_conv_minmax_fp32_scalar_lrint_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
+ .Test(xnn_qu8_igemm_minmax_fp32_ukernel_4x2__scalar_lrintf, xnn_init_qu8_conv_minmax_fp32_scalar_lrintf_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
}
}
}
-TEST(QU8_IGEMM_MINMAX_FP32_4X2__SCALAR_LRINT, qmin) {
+TEST(QU8_IGEMM_MINMAX_FP32_4X2__SCALAR_LRINTF, qmin) {
GemmMicrokernelTester()
.mr(4)
.nr(2)
@@ -50592,10 +50592,10 @@
.n(2)
.k(1)
.qmin(128)
- .Test(xnn_qu8_igemm_minmax_fp32_ukernel_4x2__scalar_lrint, xnn_init_qu8_conv_minmax_fp32_scalar_lrint_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
+ .Test(xnn_qu8_igemm_minmax_fp32_ukernel_4x2__scalar_lrintf, xnn_init_qu8_conv_minmax_fp32_scalar_lrintf_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
}
-TEST(QU8_IGEMM_MINMAX_FP32_4X2__SCALAR_LRINT, qmax) {
+TEST(QU8_IGEMM_MINMAX_FP32_4X2__SCALAR_LRINTF, qmax) {
GemmMicrokernelTester()
.mr(4)
.nr(2)
@@ -50605,10 +50605,10 @@
.n(2)
.k(1)
.qmax(128)
- .Test(xnn_qu8_igemm_minmax_fp32_ukernel_4x2__scalar_lrint, xnn_init_qu8_conv_minmax_fp32_scalar_lrint_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
+ .Test(xnn_qu8_igemm_minmax_fp32_ukernel_4x2__scalar_lrintf, xnn_init_qu8_conv_minmax_fp32_scalar_lrintf_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
}
-TEST(QU8_IGEMM_MINMAX_FP32_4X2__SCALAR_LRINT, strided_cm) {
+TEST(QU8_IGEMM_MINMAX_FP32_4X2__SCALAR_LRINTF, strided_cm) {
GemmMicrokernelTester()
.mr(4)
.nr(2)
@@ -50618,10 +50618,10 @@
.n(2)
.k(1)
.cm_stride(5)
- .Test(xnn_qu8_igemm_minmax_fp32_ukernel_4x2__scalar_lrint, xnn_init_qu8_conv_minmax_fp32_scalar_lrint_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
+ .Test(xnn_qu8_igemm_minmax_fp32_ukernel_4x2__scalar_lrintf, xnn_init_qu8_conv_minmax_fp32_scalar_lrintf_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
}
-TEST(QU8_IGEMM_MINMAX_FP32_4X2__SCALAR_LRINT, no_a_zero_point) {
+TEST(QU8_IGEMM_MINMAX_FP32_4X2__SCALAR_LRINTF, no_a_zero_point) {
for (size_t k = 1; k <= 5; k += 2) {
GemmMicrokernelTester()
.mr(4)
@@ -50632,11 +50632,11 @@
.n(2)
.k(k)
.a_zero_point(0)
- .Test(xnn_qu8_igemm_minmax_fp32_ukernel_4x2__scalar_lrint, xnn_init_qu8_conv_minmax_fp32_scalar_lrint_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
+ .Test(xnn_qu8_igemm_minmax_fp32_ukernel_4x2__scalar_lrintf, xnn_init_qu8_conv_minmax_fp32_scalar_lrintf_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
}
}
-TEST(QU8_IGEMM_MINMAX_FP32_4X2__SCALAR_LRINT, no_b_zero_point) {
+TEST(QU8_IGEMM_MINMAX_FP32_4X2__SCALAR_LRINTF, no_b_zero_point) {
for (size_t k = 1; k <= 5; k += 2) {
GemmMicrokernelTester()
.mr(4)
@@ -50647,11 +50647,11 @@
.n(2)
.k(k)
.b_zero_point(0)
- .Test(xnn_qu8_igemm_minmax_fp32_ukernel_4x2__scalar_lrint, xnn_init_qu8_conv_minmax_fp32_scalar_lrint_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
+ .Test(xnn_qu8_igemm_minmax_fp32_ukernel_4x2__scalar_lrintf, xnn_init_qu8_conv_minmax_fp32_scalar_lrintf_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
}
}
-TEST(QU8_IGEMM_MINMAX_FP32_4X2__SCALAR_LRINT, no_zero_point) {
+TEST(QU8_IGEMM_MINMAX_FP32_4X2__SCALAR_LRINTF, no_zero_point) {
for (size_t k = 1; k <= 5; k += 2) {
GemmMicrokernelTester()
.mr(4)
@@ -50663,11 +50663,11 @@
.k(k)
.a_zero_point(0)
.b_zero_point(0)
- .Test(xnn_qu8_igemm_minmax_fp32_ukernel_4x2__scalar_lrint, xnn_init_qu8_conv_minmax_fp32_scalar_lrint_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
+ .Test(xnn_qu8_igemm_minmax_fp32_ukernel_4x2__scalar_lrintf, xnn_init_qu8_conv_minmax_fp32_scalar_lrintf_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
}
}
-TEST(QU8_IGEMM_MINMAX_FP32_1X4__SCALAR_LRINT, k_eq_1) {
+TEST(QU8_IGEMM_MINMAX_FP32_1X4__SCALAR_LRINTF, k_eq_1) {
GemmMicrokernelTester()
.mr(1)
.nr(4)
@@ -50676,10 +50676,10 @@
.m(1)
.n(4)
.k(1)
- .Test(xnn_qu8_igemm_minmax_fp32_ukernel_1x4__scalar_lrint, xnn_init_qu8_conv_minmax_fp32_scalar_lrint_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
+ .Test(xnn_qu8_igemm_minmax_fp32_ukernel_1x4__scalar_lrintf, xnn_init_qu8_conv_minmax_fp32_scalar_lrintf_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
}
-TEST(QU8_IGEMM_MINMAX_FP32_1X4__SCALAR_LRINT, strided_cn) {
+TEST(QU8_IGEMM_MINMAX_FP32_1X4__SCALAR_LRINTF, strided_cn) {
GemmMicrokernelTester()
.mr(1)
.nr(4)
@@ -50689,10 +50689,10 @@
.n(4)
.k(1)
.cn_stride(7)
- .Test(xnn_qu8_igemm_minmax_fp32_ukernel_1x4__scalar_lrint, xnn_init_qu8_conv_minmax_fp32_scalar_lrint_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
+ .Test(xnn_qu8_igemm_minmax_fp32_ukernel_1x4__scalar_lrintf, xnn_init_qu8_conv_minmax_fp32_scalar_lrintf_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
}
-TEST(QU8_IGEMM_MINMAX_FP32_1X4__SCALAR_LRINT, k_eq_1_subtile) {
+TEST(QU8_IGEMM_MINMAX_FP32_1X4__SCALAR_LRINTF, k_eq_1_subtile) {
for (uint32_t m = 1; m <= 1; m++) {
for (uint32_t n = 1; n <= 4; n++) {
GemmMicrokernelTester()
@@ -50704,12 +50704,12 @@
.n(n)
.k(1)
.iterations(1)
- .Test(xnn_qu8_igemm_minmax_fp32_ukernel_1x4__scalar_lrint, xnn_init_qu8_conv_minmax_fp32_scalar_lrint_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
+ .Test(xnn_qu8_igemm_minmax_fp32_ukernel_1x4__scalar_lrintf, xnn_init_qu8_conv_minmax_fp32_scalar_lrintf_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
}
}
}
-TEST(QU8_IGEMM_MINMAX_FP32_1X4__SCALAR_LRINT, k_eq_1_subtile_m) {
+TEST(QU8_IGEMM_MINMAX_FP32_1X4__SCALAR_LRINTF, k_eq_1_subtile_m) {
for (uint32_t m = 1; m <= 1; m++) {
GemmMicrokernelTester()
.mr(1)
@@ -50720,11 +50720,11 @@
.n(4)
.k(1)
.iterations(1)
- .Test(xnn_qu8_igemm_minmax_fp32_ukernel_1x4__scalar_lrint, xnn_init_qu8_conv_minmax_fp32_scalar_lrint_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
+ .Test(xnn_qu8_igemm_minmax_fp32_ukernel_1x4__scalar_lrintf, xnn_init_qu8_conv_minmax_fp32_scalar_lrintf_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
}
}
-TEST(QU8_IGEMM_MINMAX_FP32_1X4__SCALAR_LRINT, k_eq_1_subtile_n) {
+TEST(QU8_IGEMM_MINMAX_FP32_1X4__SCALAR_LRINTF, k_eq_1_subtile_n) {
for (uint32_t n = 1; n <= 4; n++) {
GemmMicrokernelTester()
.mr(1)
@@ -50735,11 +50735,11 @@
.n(n)
.k(1)
.iterations(1)
- .Test(xnn_qu8_igemm_minmax_fp32_ukernel_1x4__scalar_lrint, xnn_init_qu8_conv_minmax_fp32_scalar_lrint_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
+ .Test(xnn_qu8_igemm_minmax_fp32_ukernel_1x4__scalar_lrintf, xnn_init_qu8_conv_minmax_fp32_scalar_lrintf_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
}
}
-TEST(QU8_IGEMM_MINMAX_FP32_1X4__SCALAR_LRINT, k_gt_1) {
+TEST(QU8_IGEMM_MINMAX_FP32_1X4__SCALAR_LRINTF, k_gt_1) {
for (size_t k = 2; k < 10; k++) {
GemmMicrokernelTester()
.mr(1)
@@ -50749,11 +50749,11 @@
.m(1)
.n(4)
.k(k)
- .Test(xnn_qu8_igemm_minmax_fp32_ukernel_1x4__scalar_lrint, xnn_init_qu8_conv_minmax_fp32_scalar_lrint_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
+ .Test(xnn_qu8_igemm_minmax_fp32_ukernel_1x4__scalar_lrintf, xnn_init_qu8_conv_minmax_fp32_scalar_lrintf_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
}
}
-TEST(QU8_IGEMM_MINMAX_FP32_1X4__SCALAR_LRINT, k_gt_1_subtile) {
+TEST(QU8_IGEMM_MINMAX_FP32_1X4__SCALAR_LRINTF, k_gt_1_subtile) {
for (size_t k = 2; k < 10; k++) {
for (uint32_t m = 1; m <= 1; m++) {
for (uint32_t n = 1; n <= 4; n++) {
@@ -50766,13 +50766,13 @@
.n(n)
.k(k)
.iterations(1)
- .Test(xnn_qu8_igemm_minmax_fp32_ukernel_1x4__scalar_lrint, xnn_init_qu8_conv_minmax_fp32_scalar_lrint_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
+ .Test(xnn_qu8_igemm_minmax_fp32_ukernel_1x4__scalar_lrintf, xnn_init_qu8_conv_minmax_fp32_scalar_lrintf_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
}
}
}
}
-TEST(QU8_IGEMM_MINMAX_FP32_1X4__SCALAR_LRINT, n_gt_4) {
+TEST(QU8_IGEMM_MINMAX_FP32_1X4__SCALAR_LRINTF, n_gt_4) {
for (uint32_t n = 5; n < 8; n++) {
for (size_t k = 1; k <= 5; k += 2) {
GemmMicrokernelTester()
@@ -50783,12 +50783,12 @@
.m(1)
.n(4)
.k(k)
- .Test(xnn_qu8_igemm_minmax_fp32_ukernel_1x4__scalar_lrint, xnn_init_qu8_conv_minmax_fp32_scalar_lrint_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
+ .Test(xnn_qu8_igemm_minmax_fp32_ukernel_1x4__scalar_lrintf, xnn_init_qu8_conv_minmax_fp32_scalar_lrintf_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
}
}
}
-TEST(QU8_IGEMM_MINMAX_FP32_1X4__SCALAR_LRINT, n_gt_4_strided_cn) {
+TEST(QU8_IGEMM_MINMAX_FP32_1X4__SCALAR_LRINTF, n_gt_4_strided_cn) {
for (uint32_t n = 5; n < 8; n++) {
for (size_t k = 1; k <= 5; k += 2) {
GemmMicrokernelTester()
@@ -50800,12 +50800,12 @@
.n(4)
.k(k)
.cn_stride(7)
- .Test(xnn_qu8_igemm_minmax_fp32_ukernel_1x4__scalar_lrint, xnn_init_qu8_conv_minmax_fp32_scalar_lrint_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
+ .Test(xnn_qu8_igemm_minmax_fp32_ukernel_1x4__scalar_lrintf, xnn_init_qu8_conv_minmax_fp32_scalar_lrintf_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
}
}
}
-TEST(QU8_IGEMM_MINMAX_FP32_1X4__SCALAR_LRINT, n_gt_4_subtile) {
+TEST(QU8_IGEMM_MINMAX_FP32_1X4__SCALAR_LRINTF, n_gt_4_subtile) {
for (uint32_t n = 5; n < 8; n++) {
for (size_t k = 1; k <= 5; k += 2) {
for (uint32_t m = 1; m <= 1; m++) {
@@ -50818,13 +50818,13 @@
.n(n)
.k(k)
.iterations(1)
- .Test(xnn_qu8_igemm_minmax_fp32_ukernel_1x4__scalar_lrint, xnn_init_qu8_conv_minmax_fp32_scalar_lrint_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
+ .Test(xnn_qu8_igemm_minmax_fp32_ukernel_1x4__scalar_lrintf, xnn_init_qu8_conv_minmax_fp32_scalar_lrintf_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
}
}
}
}
-TEST(QU8_IGEMM_MINMAX_FP32_1X4__SCALAR_LRINT, n_div_4) {
+TEST(QU8_IGEMM_MINMAX_FP32_1X4__SCALAR_LRINTF, n_div_4) {
for (uint32_t n = 8; n <= 12; n += 4) {
for (size_t k = 1; k <= 5; k += 2) {
GemmMicrokernelTester()
@@ -50835,12 +50835,12 @@
.m(1)
.n(4)
.k(k)
- .Test(xnn_qu8_igemm_minmax_fp32_ukernel_1x4__scalar_lrint, xnn_init_qu8_conv_minmax_fp32_scalar_lrint_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
+ .Test(xnn_qu8_igemm_minmax_fp32_ukernel_1x4__scalar_lrintf, xnn_init_qu8_conv_minmax_fp32_scalar_lrintf_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
}
}
}
-TEST(QU8_IGEMM_MINMAX_FP32_1X4__SCALAR_LRINT, n_div_4_strided_cn) {
+TEST(QU8_IGEMM_MINMAX_FP32_1X4__SCALAR_LRINTF, n_div_4_strided_cn) {
for (uint32_t n = 8; n <= 12; n += 4) {
for (size_t k = 1; k <= 5; k += 2) {
GemmMicrokernelTester()
@@ -50852,12 +50852,12 @@
.n(n)
.k(k)
.cn_stride(7)
- .Test(xnn_qu8_igemm_minmax_fp32_ukernel_1x4__scalar_lrint, xnn_init_qu8_conv_minmax_fp32_scalar_lrint_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
+ .Test(xnn_qu8_igemm_minmax_fp32_ukernel_1x4__scalar_lrintf, xnn_init_qu8_conv_minmax_fp32_scalar_lrintf_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
}
}
}
-TEST(QU8_IGEMM_MINMAX_FP32_1X4__SCALAR_LRINT, n_div_4_subtile) {
+TEST(QU8_IGEMM_MINMAX_FP32_1X4__SCALAR_LRINTF, n_div_4_subtile) {
for (uint32_t n = 8; n <= 12; n += 4) {
for (size_t k = 1; k <= 5; k += 2) {
for (uint32_t m = 1; m <= 1; m++) {
@@ -50870,13 +50870,13 @@
.n(n)
.k(k)
.iterations(1)
- .Test(xnn_qu8_igemm_minmax_fp32_ukernel_1x4__scalar_lrint, xnn_init_qu8_conv_minmax_fp32_scalar_lrint_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
+ .Test(xnn_qu8_igemm_minmax_fp32_ukernel_1x4__scalar_lrintf, xnn_init_qu8_conv_minmax_fp32_scalar_lrintf_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
}
}
}
}
-TEST(QU8_IGEMM_MINMAX_FP32_1X4__SCALAR_LRINT, small_kernel) {
+TEST(QU8_IGEMM_MINMAX_FP32_1X4__SCALAR_LRINTF, small_kernel) {
for (size_t k = 1; k <= 5; k += 2) {
GemmMicrokernelTester()
.mr(1)
@@ -50887,11 +50887,11 @@
.n(4)
.k(k)
.ks(3)
- .Test(xnn_qu8_igemm_minmax_fp32_ukernel_1x4__scalar_lrint, xnn_init_qu8_conv_minmax_fp32_scalar_lrint_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
+ .Test(xnn_qu8_igemm_minmax_fp32_ukernel_1x4__scalar_lrintf, xnn_init_qu8_conv_minmax_fp32_scalar_lrintf_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
}
}
-TEST(QU8_IGEMM_MINMAX_FP32_1X4__SCALAR_LRINT, small_kernel_subtile) {
+TEST(QU8_IGEMM_MINMAX_FP32_1X4__SCALAR_LRINTF, small_kernel_subtile) {
for (size_t k = 1; k <= 5; k += 2) {
for (uint32_t m = 1; m <= 1; m++) {
for (uint32_t n = 1; n <= 4; n++) {
@@ -50905,13 +50905,13 @@
.k(k)
.ks(3)
.iterations(1)
- .Test(xnn_qu8_igemm_minmax_fp32_ukernel_1x4__scalar_lrint, xnn_init_qu8_conv_minmax_fp32_scalar_lrint_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
+ .Test(xnn_qu8_igemm_minmax_fp32_ukernel_1x4__scalar_lrintf, xnn_init_qu8_conv_minmax_fp32_scalar_lrintf_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
}
}
}
}
-TEST(QU8_IGEMM_MINMAX_FP32_1X4__SCALAR_LRINT, n_gt_4_small_kernel) {
+TEST(QU8_IGEMM_MINMAX_FP32_1X4__SCALAR_LRINTF, n_gt_4_small_kernel) {
for (uint32_t n = 5; n < 8; n++) {
for (size_t k = 1; k <= 5; k += 2) {
GemmMicrokernelTester()
@@ -50923,12 +50923,12 @@
.n(4)
.k(k)
.ks(3)
- .Test(xnn_qu8_igemm_minmax_fp32_ukernel_1x4__scalar_lrint, xnn_init_qu8_conv_minmax_fp32_scalar_lrint_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
+ .Test(xnn_qu8_igemm_minmax_fp32_ukernel_1x4__scalar_lrintf, xnn_init_qu8_conv_minmax_fp32_scalar_lrintf_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
}
}
}
-TEST(QU8_IGEMM_MINMAX_FP32_1X4__SCALAR_LRINT, n_div_4_small_kernel) {
+TEST(QU8_IGEMM_MINMAX_FP32_1X4__SCALAR_LRINTF, n_div_4_small_kernel) {
for (uint32_t n = 8; n <= 12; n += 4) {
for (size_t k = 1; k <= 5; k += 2) {
GemmMicrokernelTester()
@@ -50940,12 +50940,12 @@
.n(4)
.k(k)
.ks(3)
- .Test(xnn_qu8_igemm_minmax_fp32_ukernel_1x4__scalar_lrint, xnn_init_qu8_conv_minmax_fp32_scalar_lrint_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
+ .Test(xnn_qu8_igemm_minmax_fp32_ukernel_1x4__scalar_lrintf, xnn_init_qu8_conv_minmax_fp32_scalar_lrintf_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
}
}
}
-TEST(QU8_IGEMM_MINMAX_FP32_1X4__SCALAR_LRINT, strided_cm_subtile) {
+TEST(QU8_IGEMM_MINMAX_FP32_1X4__SCALAR_LRINTF, strided_cm_subtile) {
for (size_t k = 1; k <= 5; k += 2) {
for (uint32_t m = 1; m <= 1; m++) {
for (uint32_t n = 1; n <= 4; n++) {
@@ -50959,13 +50959,13 @@
.k(k)
.cm_stride(7)
.iterations(1)
- .Test(xnn_qu8_igemm_minmax_fp32_ukernel_1x4__scalar_lrint, xnn_init_qu8_conv_minmax_fp32_scalar_lrint_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
+ .Test(xnn_qu8_igemm_minmax_fp32_ukernel_1x4__scalar_lrintf, xnn_init_qu8_conv_minmax_fp32_scalar_lrintf_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
}
}
}
}
-TEST(QU8_IGEMM_MINMAX_FP32_1X4__SCALAR_LRINT, a_offset) {
+TEST(QU8_IGEMM_MINMAX_FP32_1X4__SCALAR_LRINTF, a_offset) {
for (size_t k = 1; k <= 5; k += 2) {
GemmMicrokernelTester()
.mr(1)
@@ -50977,11 +50977,11 @@
.k(k)
.ks(3)
.a_offset(7)
- .Test(xnn_qu8_igemm_minmax_fp32_ukernel_1x4__scalar_lrint, xnn_init_qu8_conv_minmax_fp32_scalar_lrint_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
+ .Test(xnn_qu8_igemm_minmax_fp32_ukernel_1x4__scalar_lrintf, xnn_init_qu8_conv_minmax_fp32_scalar_lrintf_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
}
}
-TEST(QU8_IGEMM_MINMAX_FP32_1X4__SCALAR_LRINT, zero) {
+TEST(QU8_IGEMM_MINMAX_FP32_1X4__SCALAR_LRINTF, zero) {
for (uint32_t mz = 0; mz < 1; mz++) {
for (size_t k = 1; k <= 5; k += 2) {
GemmMicrokernelTester()
@@ -50995,12 +50995,12 @@
.ks(3)
.a_offset(7)
.zero_index(mz)
- .Test(xnn_qu8_igemm_minmax_fp32_ukernel_1x4__scalar_lrint, xnn_init_qu8_conv_minmax_fp32_scalar_lrint_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
+ .Test(xnn_qu8_igemm_minmax_fp32_ukernel_1x4__scalar_lrintf, xnn_init_qu8_conv_minmax_fp32_scalar_lrintf_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
}
}
}
-TEST(QU8_IGEMM_MINMAX_FP32_1X4__SCALAR_LRINT, qmin) {
+TEST(QU8_IGEMM_MINMAX_FP32_1X4__SCALAR_LRINTF, qmin) {
GemmMicrokernelTester()
.mr(1)
.nr(4)
@@ -51010,10 +51010,10 @@
.n(4)
.k(1)
.qmin(128)
- .Test(xnn_qu8_igemm_minmax_fp32_ukernel_1x4__scalar_lrint, xnn_init_qu8_conv_minmax_fp32_scalar_lrint_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
+ .Test(xnn_qu8_igemm_minmax_fp32_ukernel_1x4__scalar_lrintf, xnn_init_qu8_conv_minmax_fp32_scalar_lrintf_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
}
-TEST(QU8_IGEMM_MINMAX_FP32_1X4__SCALAR_LRINT, qmax) {
+TEST(QU8_IGEMM_MINMAX_FP32_1X4__SCALAR_LRINTF, qmax) {
GemmMicrokernelTester()
.mr(1)
.nr(4)
@@ -51023,10 +51023,10 @@
.n(4)
.k(1)
.qmax(128)
- .Test(xnn_qu8_igemm_minmax_fp32_ukernel_1x4__scalar_lrint, xnn_init_qu8_conv_minmax_fp32_scalar_lrint_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
+ .Test(xnn_qu8_igemm_minmax_fp32_ukernel_1x4__scalar_lrintf, xnn_init_qu8_conv_minmax_fp32_scalar_lrintf_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
}
-TEST(QU8_IGEMM_MINMAX_FP32_1X4__SCALAR_LRINT, strided_cm) {
+TEST(QU8_IGEMM_MINMAX_FP32_1X4__SCALAR_LRINTF, strided_cm) {
GemmMicrokernelTester()
.mr(1)
.nr(4)
@@ -51036,10 +51036,10 @@
.n(4)
.k(1)
.cm_stride(7)
- .Test(xnn_qu8_igemm_minmax_fp32_ukernel_1x4__scalar_lrint, xnn_init_qu8_conv_minmax_fp32_scalar_lrint_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
+ .Test(xnn_qu8_igemm_minmax_fp32_ukernel_1x4__scalar_lrintf, xnn_init_qu8_conv_minmax_fp32_scalar_lrintf_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
}
-TEST(QU8_IGEMM_MINMAX_FP32_1X4__SCALAR_LRINT, no_a_zero_point) {
+TEST(QU8_IGEMM_MINMAX_FP32_1X4__SCALAR_LRINTF, no_a_zero_point) {
for (size_t k = 1; k <= 5; k += 2) {
GemmMicrokernelTester()
.mr(1)
@@ -51050,11 +51050,11 @@
.n(4)
.k(k)
.a_zero_point(0)
- .Test(xnn_qu8_igemm_minmax_fp32_ukernel_1x4__scalar_lrint, xnn_init_qu8_conv_minmax_fp32_scalar_lrint_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
+ .Test(xnn_qu8_igemm_minmax_fp32_ukernel_1x4__scalar_lrintf, xnn_init_qu8_conv_minmax_fp32_scalar_lrintf_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
}
}
-TEST(QU8_IGEMM_MINMAX_FP32_1X4__SCALAR_LRINT, no_b_zero_point) {
+TEST(QU8_IGEMM_MINMAX_FP32_1X4__SCALAR_LRINTF, no_b_zero_point) {
for (size_t k = 1; k <= 5; k += 2) {
GemmMicrokernelTester()
.mr(1)
@@ -51065,11 +51065,11 @@
.n(4)
.k(k)
.b_zero_point(0)
- .Test(xnn_qu8_igemm_minmax_fp32_ukernel_1x4__scalar_lrint, xnn_init_qu8_conv_minmax_fp32_scalar_lrint_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
+ .Test(xnn_qu8_igemm_minmax_fp32_ukernel_1x4__scalar_lrintf, xnn_init_qu8_conv_minmax_fp32_scalar_lrintf_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
}
}
-TEST(QU8_IGEMM_MINMAX_FP32_1X4__SCALAR_LRINT, no_zero_point) {
+TEST(QU8_IGEMM_MINMAX_FP32_1X4__SCALAR_LRINTF, no_zero_point) {
for (size_t k = 1; k <= 5; k += 2) {
GemmMicrokernelTester()
.mr(1)
@@ -51081,11 +51081,11 @@
.k(k)
.a_zero_point(0)
.b_zero_point(0)
- .Test(xnn_qu8_igemm_minmax_fp32_ukernel_1x4__scalar_lrint, xnn_init_qu8_conv_minmax_fp32_scalar_lrint_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
+ .Test(xnn_qu8_igemm_minmax_fp32_ukernel_1x4__scalar_lrintf, xnn_init_qu8_conv_minmax_fp32_scalar_lrintf_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
}
}
-TEST(QU8_IGEMM_MINMAX_FP32_2X4__SCALAR_LRINT, k_eq_1) {
+TEST(QU8_IGEMM_MINMAX_FP32_2X4__SCALAR_LRINTF, k_eq_1) {
GemmMicrokernelTester()
.mr(2)
.nr(4)
@@ -51094,10 +51094,10 @@
.m(2)
.n(4)
.k(1)
- .Test(xnn_qu8_igemm_minmax_fp32_ukernel_2x4__scalar_lrint, xnn_init_qu8_conv_minmax_fp32_scalar_lrint_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
+ .Test(xnn_qu8_igemm_minmax_fp32_ukernel_2x4__scalar_lrintf, xnn_init_qu8_conv_minmax_fp32_scalar_lrintf_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
}
-TEST(QU8_IGEMM_MINMAX_FP32_2X4__SCALAR_LRINT, strided_cn) {
+TEST(QU8_IGEMM_MINMAX_FP32_2X4__SCALAR_LRINTF, strided_cn) {
GemmMicrokernelTester()
.mr(2)
.nr(4)
@@ -51107,10 +51107,10 @@
.n(4)
.k(1)
.cn_stride(7)
- .Test(xnn_qu8_igemm_minmax_fp32_ukernel_2x4__scalar_lrint, xnn_init_qu8_conv_minmax_fp32_scalar_lrint_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
+ .Test(xnn_qu8_igemm_minmax_fp32_ukernel_2x4__scalar_lrintf, xnn_init_qu8_conv_minmax_fp32_scalar_lrintf_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
}
-TEST(QU8_IGEMM_MINMAX_FP32_2X4__SCALAR_LRINT, k_eq_1_subtile) {
+TEST(QU8_IGEMM_MINMAX_FP32_2X4__SCALAR_LRINTF, k_eq_1_subtile) {
for (uint32_t m = 1; m <= 2; m++) {
for (uint32_t n = 1; n <= 4; n++) {
GemmMicrokernelTester()
@@ -51122,12 +51122,12 @@
.n(n)
.k(1)
.iterations(1)
- .Test(xnn_qu8_igemm_minmax_fp32_ukernel_2x4__scalar_lrint, xnn_init_qu8_conv_minmax_fp32_scalar_lrint_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
+ .Test(xnn_qu8_igemm_minmax_fp32_ukernel_2x4__scalar_lrintf, xnn_init_qu8_conv_minmax_fp32_scalar_lrintf_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
}
}
}
-TEST(QU8_IGEMM_MINMAX_FP32_2X4__SCALAR_LRINT, k_eq_1_subtile_m) {
+TEST(QU8_IGEMM_MINMAX_FP32_2X4__SCALAR_LRINTF, k_eq_1_subtile_m) {
for (uint32_t m = 1; m <= 2; m++) {
GemmMicrokernelTester()
.mr(2)
@@ -51138,11 +51138,11 @@
.n(4)
.k(1)
.iterations(1)
- .Test(xnn_qu8_igemm_minmax_fp32_ukernel_2x4__scalar_lrint, xnn_init_qu8_conv_minmax_fp32_scalar_lrint_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
+ .Test(xnn_qu8_igemm_minmax_fp32_ukernel_2x4__scalar_lrintf, xnn_init_qu8_conv_minmax_fp32_scalar_lrintf_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
}
}
-TEST(QU8_IGEMM_MINMAX_FP32_2X4__SCALAR_LRINT, k_eq_1_subtile_n) {
+TEST(QU8_IGEMM_MINMAX_FP32_2X4__SCALAR_LRINTF, k_eq_1_subtile_n) {
for (uint32_t n = 1; n <= 4; n++) {
GemmMicrokernelTester()
.mr(2)
@@ -51153,11 +51153,11 @@
.n(n)
.k(1)
.iterations(1)
- .Test(xnn_qu8_igemm_minmax_fp32_ukernel_2x4__scalar_lrint, xnn_init_qu8_conv_minmax_fp32_scalar_lrint_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
+ .Test(xnn_qu8_igemm_minmax_fp32_ukernel_2x4__scalar_lrintf, xnn_init_qu8_conv_minmax_fp32_scalar_lrintf_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
}
}
-TEST(QU8_IGEMM_MINMAX_FP32_2X4__SCALAR_LRINT, k_gt_1) {
+TEST(QU8_IGEMM_MINMAX_FP32_2X4__SCALAR_LRINTF, k_gt_1) {
for (size_t k = 2; k < 10; k++) {
GemmMicrokernelTester()
.mr(2)
@@ -51167,11 +51167,11 @@
.m(2)
.n(4)
.k(k)
- .Test(xnn_qu8_igemm_minmax_fp32_ukernel_2x4__scalar_lrint, xnn_init_qu8_conv_minmax_fp32_scalar_lrint_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
+ .Test(xnn_qu8_igemm_minmax_fp32_ukernel_2x4__scalar_lrintf, xnn_init_qu8_conv_minmax_fp32_scalar_lrintf_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
}
}
-TEST(QU8_IGEMM_MINMAX_FP32_2X4__SCALAR_LRINT, k_gt_1_subtile) {
+TEST(QU8_IGEMM_MINMAX_FP32_2X4__SCALAR_LRINTF, k_gt_1_subtile) {
for (size_t k = 2; k < 10; k++) {
for (uint32_t m = 1; m <= 2; m++) {
for (uint32_t n = 1; n <= 4; n++) {
@@ -51184,13 +51184,13 @@
.n(n)
.k(k)
.iterations(1)
- .Test(xnn_qu8_igemm_minmax_fp32_ukernel_2x4__scalar_lrint, xnn_init_qu8_conv_minmax_fp32_scalar_lrint_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
+ .Test(xnn_qu8_igemm_minmax_fp32_ukernel_2x4__scalar_lrintf, xnn_init_qu8_conv_minmax_fp32_scalar_lrintf_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
}
}
}
}
-TEST(QU8_IGEMM_MINMAX_FP32_2X4__SCALAR_LRINT, n_gt_4) {
+TEST(QU8_IGEMM_MINMAX_FP32_2X4__SCALAR_LRINTF, n_gt_4) {
for (uint32_t n = 5; n < 8; n++) {
for (size_t k = 1; k <= 5; k += 2) {
GemmMicrokernelTester()
@@ -51201,12 +51201,12 @@
.m(2)
.n(4)
.k(k)
- .Test(xnn_qu8_igemm_minmax_fp32_ukernel_2x4__scalar_lrint, xnn_init_qu8_conv_minmax_fp32_scalar_lrint_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
+ .Test(xnn_qu8_igemm_minmax_fp32_ukernel_2x4__scalar_lrintf, xnn_init_qu8_conv_minmax_fp32_scalar_lrintf_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
}
}
}
-TEST(QU8_IGEMM_MINMAX_FP32_2X4__SCALAR_LRINT, n_gt_4_strided_cn) {
+TEST(QU8_IGEMM_MINMAX_FP32_2X4__SCALAR_LRINTF, n_gt_4_strided_cn) {
for (uint32_t n = 5; n < 8; n++) {
for (size_t k = 1; k <= 5; k += 2) {
GemmMicrokernelTester()
@@ -51218,12 +51218,12 @@
.n(4)
.k(k)
.cn_stride(7)
- .Test(xnn_qu8_igemm_minmax_fp32_ukernel_2x4__scalar_lrint, xnn_init_qu8_conv_minmax_fp32_scalar_lrint_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
+ .Test(xnn_qu8_igemm_minmax_fp32_ukernel_2x4__scalar_lrintf, xnn_init_qu8_conv_minmax_fp32_scalar_lrintf_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
}
}
}
-TEST(QU8_IGEMM_MINMAX_FP32_2X4__SCALAR_LRINT, n_gt_4_subtile) {
+TEST(QU8_IGEMM_MINMAX_FP32_2X4__SCALAR_LRINTF, n_gt_4_subtile) {
for (uint32_t n = 5; n < 8; n++) {
for (size_t k = 1; k <= 5; k += 2) {
for (uint32_t m = 1; m <= 2; m++) {
@@ -51236,13 +51236,13 @@
.n(n)
.k(k)
.iterations(1)
- .Test(xnn_qu8_igemm_minmax_fp32_ukernel_2x4__scalar_lrint, xnn_init_qu8_conv_minmax_fp32_scalar_lrint_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
+ .Test(xnn_qu8_igemm_minmax_fp32_ukernel_2x4__scalar_lrintf, xnn_init_qu8_conv_minmax_fp32_scalar_lrintf_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
}
}
}
}
-TEST(QU8_IGEMM_MINMAX_FP32_2X4__SCALAR_LRINT, n_div_4) {
+TEST(QU8_IGEMM_MINMAX_FP32_2X4__SCALAR_LRINTF, n_div_4) {
for (uint32_t n = 8; n <= 12; n += 4) {
for (size_t k = 1; k <= 5; k += 2) {
GemmMicrokernelTester()
@@ -51253,12 +51253,12 @@
.m(2)
.n(4)
.k(k)
- .Test(xnn_qu8_igemm_minmax_fp32_ukernel_2x4__scalar_lrint, xnn_init_qu8_conv_minmax_fp32_scalar_lrint_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
+ .Test(xnn_qu8_igemm_minmax_fp32_ukernel_2x4__scalar_lrintf, xnn_init_qu8_conv_minmax_fp32_scalar_lrintf_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
}
}
}
-TEST(QU8_IGEMM_MINMAX_FP32_2X4__SCALAR_LRINT, n_div_4_strided_cn) {
+TEST(QU8_IGEMM_MINMAX_FP32_2X4__SCALAR_LRINTF, n_div_4_strided_cn) {
for (uint32_t n = 8; n <= 12; n += 4) {
for (size_t k = 1; k <= 5; k += 2) {
GemmMicrokernelTester()
@@ -51270,12 +51270,12 @@
.n(n)
.k(k)
.cn_stride(7)
- .Test(xnn_qu8_igemm_minmax_fp32_ukernel_2x4__scalar_lrint, xnn_init_qu8_conv_minmax_fp32_scalar_lrint_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
+ .Test(xnn_qu8_igemm_minmax_fp32_ukernel_2x4__scalar_lrintf, xnn_init_qu8_conv_minmax_fp32_scalar_lrintf_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
}
}
}
-TEST(QU8_IGEMM_MINMAX_FP32_2X4__SCALAR_LRINT, n_div_4_subtile) {
+TEST(QU8_IGEMM_MINMAX_FP32_2X4__SCALAR_LRINTF, n_div_4_subtile) {
for (uint32_t n = 8; n <= 12; n += 4) {
for (size_t k = 1; k <= 5; k += 2) {
for (uint32_t m = 1; m <= 2; m++) {
@@ -51288,13 +51288,13 @@
.n(n)
.k(k)
.iterations(1)
- .Test(xnn_qu8_igemm_minmax_fp32_ukernel_2x4__scalar_lrint, xnn_init_qu8_conv_minmax_fp32_scalar_lrint_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
+ .Test(xnn_qu8_igemm_minmax_fp32_ukernel_2x4__scalar_lrintf, xnn_init_qu8_conv_minmax_fp32_scalar_lrintf_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
}
}
}
}
-TEST(QU8_IGEMM_MINMAX_FP32_2X4__SCALAR_LRINT, small_kernel) {
+TEST(QU8_IGEMM_MINMAX_FP32_2X4__SCALAR_LRINTF, small_kernel) {
for (size_t k = 1; k <= 5; k += 2) {
GemmMicrokernelTester()
.mr(2)
@@ -51305,11 +51305,11 @@
.n(4)
.k(k)
.ks(3)
- .Test(xnn_qu8_igemm_minmax_fp32_ukernel_2x4__scalar_lrint, xnn_init_qu8_conv_minmax_fp32_scalar_lrint_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
+ .Test(xnn_qu8_igemm_minmax_fp32_ukernel_2x4__scalar_lrintf, xnn_init_qu8_conv_minmax_fp32_scalar_lrintf_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
}
}
-TEST(QU8_IGEMM_MINMAX_FP32_2X4__SCALAR_LRINT, small_kernel_subtile) {
+TEST(QU8_IGEMM_MINMAX_FP32_2X4__SCALAR_LRINTF, small_kernel_subtile) {
for (size_t k = 1; k <= 5; k += 2) {
for (uint32_t m = 1; m <= 2; m++) {
for (uint32_t n = 1; n <= 4; n++) {
@@ -51323,13 +51323,13 @@
.k(k)
.ks(3)
.iterations(1)
- .Test(xnn_qu8_igemm_minmax_fp32_ukernel_2x4__scalar_lrint, xnn_init_qu8_conv_minmax_fp32_scalar_lrint_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
+ .Test(xnn_qu8_igemm_minmax_fp32_ukernel_2x4__scalar_lrintf, xnn_init_qu8_conv_minmax_fp32_scalar_lrintf_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
}
}
}
}
-TEST(QU8_IGEMM_MINMAX_FP32_2X4__SCALAR_LRINT, n_gt_4_small_kernel) {
+TEST(QU8_IGEMM_MINMAX_FP32_2X4__SCALAR_LRINTF, n_gt_4_small_kernel) {
for (uint32_t n = 5; n < 8; n++) {
for (size_t k = 1; k <= 5; k += 2) {
GemmMicrokernelTester()
@@ -51341,12 +51341,12 @@
.n(4)
.k(k)
.ks(3)
- .Test(xnn_qu8_igemm_minmax_fp32_ukernel_2x4__scalar_lrint, xnn_init_qu8_conv_minmax_fp32_scalar_lrint_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
+ .Test(xnn_qu8_igemm_minmax_fp32_ukernel_2x4__scalar_lrintf, xnn_init_qu8_conv_minmax_fp32_scalar_lrintf_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
}
}
}
-TEST(QU8_IGEMM_MINMAX_FP32_2X4__SCALAR_LRINT, n_div_4_small_kernel) {
+TEST(QU8_IGEMM_MINMAX_FP32_2X4__SCALAR_LRINTF, n_div_4_small_kernel) {
for (uint32_t n = 8; n <= 12; n += 4) {
for (size_t k = 1; k <= 5; k += 2) {
GemmMicrokernelTester()
@@ -51358,12 +51358,12 @@
.n(4)
.k(k)
.ks(3)
- .Test(xnn_qu8_igemm_minmax_fp32_ukernel_2x4__scalar_lrint, xnn_init_qu8_conv_minmax_fp32_scalar_lrint_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
+ .Test(xnn_qu8_igemm_minmax_fp32_ukernel_2x4__scalar_lrintf, xnn_init_qu8_conv_minmax_fp32_scalar_lrintf_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
}
}
}
-TEST(QU8_IGEMM_MINMAX_FP32_2X4__SCALAR_LRINT, strided_cm_subtile) {
+TEST(QU8_IGEMM_MINMAX_FP32_2X4__SCALAR_LRINTF, strided_cm_subtile) {
for (size_t k = 1; k <= 5; k += 2) {
for (uint32_t m = 1; m <= 2; m++) {
for (uint32_t n = 1; n <= 4; n++) {
@@ -51377,13 +51377,13 @@
.k(k)
.cm_stride(7)
.iterations(1)
- .Test(xnn_qu8_igemm_minmax_fp32_ukernel_2x4__scalar_lrint, xnn_init_qu8_conv_minmax_fp32_scalar_lrint_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
+ .Test(xnn_qu8_igemm_minmax_fp32_ukernel_2x4__scalar_lrintf, xnn_init_qu8_conv_minmax_fp32_scalar_lrintf_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
}
}
}
}
-TEST(QU8_IGEMM_MINMAX_FP32_2X4__SCALAR_LRINT, a_offset) {
+TEST(QU8_IGEMM_MINMAX_FP32_2X4__SCALAR_LRINTF, a_offset) {
for (size_t k = 1; k <= 5; k += 2) {
GemmMicrokernelTester()
.mr(2)
@@ -51395,11 +51395,11 @@
.k(k)
.ks(3)
.a_offset(13)
- .Test(xnn_qu8_igemm_minmax_fp32_ukernel_2x4__scalar_lrint, xnn_init_qu8_conv_minmax_fp32_scalar_lrint_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
+ .Test(xnn_qu8_igemm_minmax_fp32_ukernel_2x4__scalar_lrintf, xnn_init_qu8_conv_minmax_fp32_scalar_lrintf_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
}
}
-TEST(QU8_IGEMM_MINMAX_FP32_2X4__SCALAR_LRINT, zero) {
+TEST(QU8_IGEMM_MINMAX_FP32_2X4__SCALAR_LRINTF, zero) {
for (uint32_t mz = 0; mz < 2; mz++) {
for (size_t k = 1; k <= 5; k += 2) {
GemmMicrokernelTester()
@@ -51413,12 +51413,12 @@
.ks(3)
.a_offset(13)
.zero_index(mz)
- .Test(xnn_qu8_igemm_minmax_fp32_ukernel_2x4__scalar_lrint, xnn_init_qu8_conv_minmax_fp32_scalar_lrint_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
+ .Test(xnn_qu8_igemm_minmax_fp32_ukernel_2x4__scalar_lrintf, xnn_init_qu8_conv_minmax_fp32_scalar_lrintf_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
}
}
}
-TEST(QU8_IGEMM_MINMAX_FP32_2X4__SCALAR_LRINT, qmin) {
+TEST(QU8_IGEMM_MINMAX_FP32_2X4__SCALAR_LRINTF, qmin) {
GemmMicrokernelTester()
.mr(2)
.nr(4)
@@ -51428,10 +51428,10 @@
.n(4)
.k(1)
.qmin(128)
- .Test(xnn_qu8_igemm_minmax_fp32_ukernel_2x4__scalar_lrint, xnn_init_qu8_conv_minmax_fp32_scalar_lrint_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
+ .Test(xnn_qu8_igemm_minmax_fp32_ukernel_2x4__scalar_lrintf, xnn_init_qu8_conv_minmax_fp32_scalar_lrintf_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
}
-TEST(QU8_IGEMM_MINMAX_FP32_2X4__SCALAR_LRINT, qmax) {
+TEST(QU8_IGEMM_MINMAX_FP32_2X4__SCALAR_LRINTF, qmax) {
GemmMicrokernelTester()
.mr(2)
.nr(4)
@@ -51441,10 +51441,10 @@
.n(4)
.k(1)
.qmax(128)
- .Test(xnn_qu8_igemm_minmax_fp32_ukernel_2x4__scalar_lrint, xnn_init_qu8_conv_minmax_fp32_scalar_lrint_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
+ .Test(xnn_qu8_igemm_minmax_fp32_ukernel_2x4__scalar_lrintf, xnn_init_qu8_conv_minmax_fp32_scalar_lrintf_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
}
-TEST(QU8_IGEMM_MINMAX_FP32_2X4__SCALAR_LRINT, strided_cm) {
+TEST(QU8_IGEMM_MINMAX_FP32_2X4__SCALAR_LRINTF, strided_cm) {
GemmMicrokernelTester()
.mr(2)
.nr(4)
@@ -51454,10 +51454,10 @@
.n(4)
.k(1)
.cm_stride(7)
- .Test(xnn_qu8_igemm_minmax_fp32_ukernel_2x4__scalar_lrint, xnn_init_qu8_conv_minmax_fp32_scalar_lrint_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
+ .Test(xnn_qu8_igemm_minmax_fp32_ukernel_2x4__scalar_lrintf, xnn_init_qu8_conv_minmax_fp32_scalar_lrintf_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
}
-TEST(QU8_IGEMM_MINMAX_FP32_2X4__SCALAR_LRINT, no_a_zero_point) {
+TEST(QU8_IGEMM_MINMAX_FP32_2X4__SCALAR_LRINTF, no_a_zero_point) {
for (size_t k = 1; k <= 5; k += 2) {
GemmMicrokernelTester()
.mr(2)
@@ -51468,11 +51468,11 @@
.n(4)
.k(k)
.a_zero_point(0)
- .Test(xnn_qu8_igemm_minmax_fp32_ukernel_2x4__scalar_lrint, xnn_init_qu8_conv_minmax_fp32_scalar_lrint_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
+ .Test(xnn_qu8_igemm_minmax_fp32_ukernel_2x4__scalar_lrintf, xnn_init_qu8_conv_minmax_fp32_scalar_lrintf_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
}
}
-TEST(QU8_IGEMM_MINMAX_FP32_2X4__SCALAR_LRINT, no_b_zero_point) {
+TEST(QU8_IGEMM_MINMAX_FP32_2X4__SCALAR_LRINTF, no_b_zero_point) {
for (size_t k = 1; k <= 5; k += 2) {
GemmMicrokernelTester()
.mr(2)
@@ -51483,11 +51483,11 @@
.n(4)
.k(k)
.b_zero_point(0)
- .Test(xnn_qu8_igemm_minmax_fp32_ukernel_2x4__scalar_lrint, xnn_init_qu8_conv_minmax_fp32_scalar_lrint_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
+ .Test(xnn_qu8_igemm_minmax_fp32_ukernel_2x4__scalar_lrintf, xnn_init_qu8_conv_minmax_fp32_scalar_lrintf_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
}
}
-TEST(QU8_IGEMM_MINMAX_FP32_2X4__SCALAR_LRINT, no_zero_point) {
+TEST(QU8_IGEMM_MINMAX_FP32_2X4__SCALAR_LRINTF, no_zero_point) {
for (size_t k = 1; k <= 5; k += 2) {
GemmMicrokernelTester()
.mr(2)
@@ -51499,11 +51499,11 @@
.k(k)
.a_zero_point(0)
.b_zero_point(0)
- .Test(xnn_qu8_igemm_minmax_fp32_ukernel_2x4__scalar_lrint, xnn_init_qu8_conv_minmax_fp32_scalar_lrint_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
+ .Test(xnn_qu8_igemm_minmax_fp32_ukernel_2x4__scalar_lrintf, xnn_init_qu8_conv_minmax_fp32_scalar_lrintf_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
}
}
-TEST(QU8_IGEMM_MINMAX_FP32_3X4__SCALAR_LRINT, k_eq_1) {
+TEST(QU8_IGEMM_MINMAX_FP32_3X4__SCALAR_LRINTF, k_eq_1) {
GemmMicrokernelTester()
.mr(3)
.nr(4)
@@ -51512,10 +51512,10 @@
.m(3)
.n(4)
.k(1)
- .Test(xnn_qu8_igemm_minmax_fp32_ukernel_3x4__scalar_lrint, xnn_init_qu8_conv_minmax_fp32_scalar_lrint_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
+ .Test(xnn_qu8_igemm_minmax_fp32_ukernel_3x4__scalar_lrintf, xnn_init_qu8_conv_minmax_fp32_scalar_lrintf_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
}
-TEST(QU8_IGEMM_MINMAX_FP32_3X4__SCALAR_LRINT, strided_cn) {
+TEST(QU8_IGEMM_MINMAX_FP32_3X4__SCALAR_LRINTF, strided_cn) {
GemmMicrokernelTester()
.mr(3)
.nr(4)
@@ -51525,10 +51525,10 @@
.n(4)
.k(1)
.cn_stride(7)
- .Test(xnn_qu8_igemm_minmax_fp32_ukernel_3x4__scalar_lrint, xnn_init_qu8_conv_minmax_fp32_scalar_lrint_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
+ .Test(xnn_qu8_igemm_minmax_fp32_ukernel_3x4__scalar_lrintf, xnn_init_qu8_conv_minmax_fp32_scalar_lrintf_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
}
-TEST(QU8_IGEMM_MINMAX_FP32_3X4__SCALAR_LRINT, k_eq_1_subtile) {
+TEST(QU8_IGEMM_MINMAX_FP32_3X4__SCALAR_LRINTF, k_eq_1_subtile) {
for (uint32_t m = 1; m <= 3; m++) {
for (uint32_t n = 1; n <= 4; n++) {
GemmMicrokernelTester()
@@ -51540,12 +51540,12 @@
.n(n)
.k(1)
.iterations(1)
- .Test(xnn_qu8_igemm_minmax_fp32_ukernel_3x4__scalar_lrint, xnn_init_qu8_conv_minmax_fp32_scalar_lrint_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
+ .Test(xnn_qu8_igemm_minmax_fp32_ukernel_3x4__scalar_lrintf, xnn_init_qu8_conv_minmax_fp32_scalar_lrintf_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
}
}
}
-TEST(QU8_IGEMM_MINMAX_FP32_3X4__SCALAR_LRINT, k_eq_1_subtile_m) {
+TEST(QU8_IGEMM_MINMAX_FP32_3X4__SCALAR_LRINTF, k_eq_1_subtile_m) {
for (uint32_t m = 1; m <= 3; m++) {
GemmMicrokernelTester()
.mr(3)
@@ -51556,11 +51556,11 @@
.n(4)
.k(1)
.iterations(1)
- .Test(xnn_qu8_igemm_minmax_fp32_ukernel_3x4__scalar_lrint, xnn_init_qu8_conv_minmax_fp32_scalar_lrint_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
+ .Test(xnn_qu8_igemm_minmax_fp32_ukernel_3x4__scalar_lrintf, xnn_init_qu8_conv_minmax_fp32_scalar_lrintf_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
}
}
-TEST(QU8_IGEMM_MINMAX_FP32_3X4__SCALAR_LRINT, k_eq_1_subtile_n) {
+TEST(QU8_IGEMM_MINMAX_FP32_3X4__SCALAR_LRINTF, k_eq_1_subtile_n) {
for (uint32_t n = 1; n <= 4; n++) {
GemmMicrokernelTester()
.mr(3)
@@ -51571,11 +51571,11 @@
.n(n)
.k(1)
.iterations(1)
- .Test(xnn_qu8_igemm_minmax_fp32_ukernel_3x4__scalar_lrint, xnn_init_qu8_conv_minmax_fp32_scalar_lrint_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
+ .Test(xnn_qu8_igemm_minmax_fp32_ukernel_3x4__scalar_lrintf, xnn_init_qu8_conv_minmax_fp32_scalar_lrintf_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
}
}
-TEST(QU8_IGEMM_MINMAX_FP32_3X4__SCALAR_LRINT, k_gt_1) {
+TEST(QU8_IGEMM_MINMAX_FP32_3X4__SCALAR_LRINTF, k_gt_1) {
for (size_t k = 2; k < 10; k++) {
GemmMicrokernelTester()
.mr(3)
@@ -51585,11 +51585,11 @@
.m(3)
.n(4)
.k(k)
- .Test(xnn_qu8_igemm_minmax_fp32_ukernel_3x4__scalar_lrint, xnn_init_qu8_conv_minmax_fp32_scalar_lrint_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
+ .Test(xnn_qu8_igemm_minmax_fp32_ukernel_3x4__scalar_lrintf, xnn_init_qu8_conv_minmax_fp32_scalar_lrintf_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
}
}
-TEST(QU8_IGEMM_MINMAX_FP32_3X4__SCALAR_LRINT, k_gt_1_subtile) {
+TEST(QU8_IGEMM_MINMAX_FP32_3X4__SCALAR_LRINTF, k_gt_1_subtile) {
for (size_t k = 2; k < 10; k++) {
for (uint32_t m = 1; m <= 3; m++) {
for (uint32_t n = 1; n <= 4; n++) {
@@ -51602,13 +51602,13 @@
.n(n)
.k(k)
.iterations(1)
- .Test(xnn_qu8_igemm_minmax_fp32_ukernel_3x4__scalar_lrint, xnn_init_qu8_conv_minmax_fp32_scalar_lrint_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
+ .Test(xnn_qu8_igemm_minmax_fp32_ukernel_3x4__scalar_lrintf, xnn_init_qu8_conv_minmax_fp32_scalar_lrintf_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
}
}
}
}
-TEST(QU8_IGEMM_MINMAX_FP32_3X4__SCALAR_LRINT, n_gt_4) {
+TEST(QU8_IGEMM_MINMAX_FP32_3X4__SCALAR_LRINTF, n_gt_4) {
for (uint32_t n = 5; n < 8; n++) {
for (size_t k = 1; k <= 5; k += 2) {
GemmMicrokernelTester()
@@ -51619,12 +51619,12 @@
.m(3)
.n(4)
.k(k)
- .Test(xnn_qu8_igemm_minmax_fp32_ukernel_3x4__scalar_lrint, xnn_init_qu8_conv_minmax_fp32_scalar_lrint_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
+ .Test(xnn_qu8_igemm_minmax_fp32_ukernel_3x4__scalar_lrintf, xnn_init_qu8_conv_minmax_fp32_scalar_lrintf_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
}
}
}
-TEST(QU8_IGEMM_MINMAX_FP32_3X4__SCALAR_LRINT, n_gt_4_strided_cn) {
+TEST(QU8_IGEMM_MINMAX_FP32_3X4__SCALAR_LRINTF, n_gt_4_strided_cn) {
for (uint32_t n = 5; n < 8; n++) {
for (size_t k = 1; k <= 5; k += 2) {
GemmMicrokernelTester()
@@ -51636,12 +51636,12 @@
.n(4)
.k(k)
.cn_stride(7)
- .Test(xnn_qu8_igemm_minmax_fp32_ukernel_3x4__scalar_lrint, xnn_init_qu8_conv_minmax_fp32_scalar_lrint_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
+ .Test(xnn_qu8_igemm_minmax_fp32_ukernel_3x4__scalar_lrintf, xnn_init_qu8_conv_minmax_fp32_scalar_lrintf_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
}
}
}
-TEST(QU8_IGEMM_MINMAX_FP32_3X4__SCALAR_LRINT, n_gt_4_subtile) {
+TEST(QU8_IGEMM_MINMAX_FP32_3X4__SCALAR_LRINTF, n_gt_4_subtile) {
for (uint32_t n = 5; n < 8; n++) {
for (size_t k = 1; k <= 5; k += 2) {
for (uint32_t m = 1; m <= 3; m++) {
@@ -51654,13 +51654,13 @@
.n(n)
.k(k)
.iterations(1)
- .Test(xnn_qu8_igemm_minmax_fp32_ukernel_3x4__scalar_lrint, xnn_init_qu8_conv_minmax_fp32_scalar_lrint_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
+ .Test(xnn_qu8_igemm_minmax_fp32_ukernel_3x4__scalar_lrintf, xnn_init_qu8_conv_minmax_fp32_scalar_lrintf_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
}
}
}
}
-TEST(QU8_IGEMM_MINMAX_FP32_3X4__SCALAR_LRINT, n_div_4) {
+TEST(QU8_IGEMM_MINMAX_FP32_3X4__SCALAR_LRINTF, n_div_4) {
for (uint32_t n = 8; n <= 12; n += 4) {
for (size_t k = 1; k <= 5; k += 2) {
GemmMicrokernelTester()
@@ -51671,12 +51671,12 @@
.m(3)
.n(4)
.k(k)
- .Test(xnn_qu8_igemm_minmax_fp32_ukernel_3x4__scalar_lrint, xnn_init_qu8_conv_minmax_fp32_scalar_lrint_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
+ .Test(xnn_qu8_igemm_minmax_fp32_ukernel_3x4__scalar_lrintf, xnn_init_qu8_conv_minmax_fp32_scalar_lrintf_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
}
}
}
-TEST(QU8_IGEMM_MINMAX_FP32_3X4__SCALAR_LRINT, n_div_4_strided_cn) {
+TEST(QU8_IGEMM_MINMAX_FP32_3X4__SCALAR_LRINTF, n_div_4_strided_cn) {
for (uint32_t n = 8; n <= 12; n += 4) {
for (size_t k = 1; k <= 5; k += 2) {
GemmMicrokernelTester()
@@ -51688,12 +51688,12 @@
.n(n)
.k(k)
.cn_stride(7)
- .Test(xnn_qu8_igemm_minmax_fp32_ukernel_3x4__scalar_lrint, xnn_init_qu8_conv_minmax_fp32_scalar_lrint_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
+ .Test(xnn_qu8_igemm_minmax_fp32_ukernel_3x4__scalar_lrintf, xnn_init_qu8_conv_minmax_fp32_scalar_lrintf_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
}
}
}
-TEST(QU8_IGEMM_MINMAX_FP32_3X4__SCALAR_LRINT, n_div_4_subtile) {
+TEST(QU8_IGEMM_MINMAX_FP32_3X4__SCALAR_LRINTF, n_div_4_subtile) {
for (uint32_t n = 8; n <= 12; n += 4) {
for (size_t k = 1; k <= 5; k += 2) {
for (uint32_t m = 1; m <= 3; m++) {
@@ -51706,13 +51706,13 @@
.n(n)
.k(k)
.iterations(1)
- .Test(xnn_qu8_igemm_minmax_fp32_ukernel_3x4__scalar_lrint, xnn_init_qu8_conv_minmax_fp32_scalar_lrint_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
+ .Test(xnn_qu8_igemm_minmax_fp32_ukernel_3x4__scalar_lrintf, xnn_init_qu8_conv_minmax_fp32_scalar_lrintf_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
}
}
}
}
-TEST(QU8_IGEMM_MINMAX_FP32_3X4__SCALAR_LRINT, small_kernel) {
+TEST(QU8_IGEMM_MINMAX_FP32_3X4__SCALAR_LRINTF, small_kernel) {
for (size_t k = 1; k <= 5; k += 2) {
GemmMicrokernelTester()
.mr(3)
@@ -51723,11 +51723,11 @@
.n(4)
.k(k)
.ks(3)
- .Test(xnn_qu8_igemm_minmax_fp32_ukernel_3x4__scalar_lrint, xnn_init_qu8_conv_minmax_fp32_scalar_lrint_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
+ .Test(xnn_qu8_igemm_minmax_fp32_ukernel_3x4__scalar_lrintf, xnn_init_qu8_conv_minmax_fp32_scalar_lrintf_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
}
}
-TEST(QU8_IGEMM_MINMAX_FP32_3X4__SCALAR_LRINT, small_kernel_subtile) {
+TEST(QU8_IGEMM_MINMAX_FP32_3X4__SCALAR_LRINTF, small_kernel_subtile) {
for (size_t k = 1; k <= 5; k += 2) {
for (uint32_t m = 1; m <= 3; m++) {
for (uint32_t n = 1; n <= 4; n++) {
@@ -51741,13 +51741,13 @@
.k(k)
.ks(3)
.iterations(1)
- .Test(xnn_qu8_igemm_minmax_fp32_ukernel_3x4__scalar_lrint, xnn_init_qu8_conv_minmax_fp32_scalar_lrint_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
+ .Test(xnn_qu8_igemm_minmax_fp32_ukernel_3x4__scalar_lrintf, xnn_init_qu8_conv_minmax_fp32_scalar_lrintf_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
}
}
}
}
-TEST(QU8_IGEMM_MINMAX_FP32_3X4__SCALAR_LRINT, n_gt_4_small_kernel) {
+TEST(QU8_IGEMM_MINMAX_FP32_3X4__SCALAR_LRINTF, n_gt_4_small_kernel) {
for (uint32_t n = 5; n < 8; n++) {
for (size_t k = 1; k <= 5; k += 2) {
GemmMicrokernelTester()
@@ -51759,12 +51759,12 @@
.n(4)
.k(k)
.ks(3)
- .Test(xnn_qu8_igemm_minmax_fp32_ukernel_3x4__scalar_lrint, xnn_init_qu8_conv_minmax_fp32_scalar_lrint_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
+ .Test(xnn_qu8_igemm_minmax_fp32_ukernel_3x4__scalar_lrintf, xnn_init_qu8_conv_minmax_fp32_scalar_lrintf_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
}
}
}
-TEST(QU8_IGEMM_MINMAX_FP32_3X4__SCALAR_LRINT, n_div_4_small_kernel) {
+TEST(QU8_IGEMM_MINMAX_FP32_3X4__SCALAR_LRINTF, n_div_4_small_kernel) {
for (uint32_t n = 8; n <= 12; n += 4) {
for (size_t k = 1; k <= 5; k += 2) {
GemmMicrokernelTester()
@@ -51776,12 +51776,12 @@
.n(4)
.k(k)
.ks(3)
- .Test(xnn_qu8_igemm_minmax_fp32_ukernel_3x4__scalar_lrint, xnn_init_qu8_conv_minmax_fp32_scalar_lrint_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
+ .Test(xnn_qu8_igemm_minmax_fp32_ukernel_3x4__scalar_lrintf, xnn_init_qu8_conv_minmax_fp32_scalar_lrintf_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
}
}
}
-TEST(QU8_IGEMM_MINMAX_FP32_3X4__SCALAR_LRINT, strided_cm_subtile) {
+TEST(QU8_IGEMM_MINMAX_FP32_3X4__SCALAR_LRINTF, strided_cm_subtile) {
for (size_t k = 1; k <= 5; k += 2) {
for (uint32_t m = 1; m <= 3; m++) {
for (uint32_t n = 1; n <= 4; n++) {
@@ -51795,13 +51795,13 @@
.k(k)
.cm_stride(7)
.iterations(1)
- .Test(xnn_qu8_igemm_minmax_fp32_ukernel_3x4__scalar_lrint, xnn_init_qu8_conv_minmax_fp32_scalar_lrint_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
+ .Test(xnn_qu8_igemm_minmax_fp32_ukernel_3x4__scalar_lrintf, xnn_init_qu8_conv_minmax_fp32_scalar_lrintf_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
}
}
}
}
-TEST(QU8_IGEMM_MINMAX_FP32_3X4__SCALAR_LRINT, a_offset) {
+TEST(QU8_IGEMM_MINMAX_FP32_3X4__SCALAR_LRINTF, a_offset) {
for (size_t k = 1; k <= 5; k += 2) {
GemmMicrokernelTester()
.mr(3)
@@ -51813,11 +51813,11 @@
.k(k)
.ks(3)
.a_offset(17)
- .Test(xnn_qu8_igemm_minmax_fp32_ukernel_3x4__scalar_lrint, xnn_init_qu8_conv_minmax_fp32_scalar_lrint_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
+ .Test(xnn_qu8_igemm_minmax_fp32_ukernel_3x4__scalar_lrintf, xnn_init_qu8_conv_minmax_fp32_scalar_lrintf_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
}
}
-TEST(QU8_IGEMM_MINMAX_FP32_3X4__SCALAR_LRINT, zero) {
+TEST(QU8_IGEMM_MINMAX_FP32_3X4__SCALAR_LRINTF, zero) {
for (uint32_t mz = 0; mz < 3; mz++) {
for (size_t k = 1; k <= 5; k += 2) {
GemmMicrokernelTester()
@@ -51831,12 +51831,12 @@
.ks(3)
.a_offset(17)
.zero_index(mz)
- .Test(xnn_qu8_igemm_minmax_fp32_ukernel_3x4__scalar_lrint, xnn_init_qu8_conv_minmax_fp32_scalar_lrint_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
+ .Test(xnn_qu8_igemm_minmax_fp32_ukernel_3x4__scalar_lrintf, xnn_init_qu8_conv_minmax_fp32_scalar_lrintf_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
}
}
}
-TEST(QU8_IGEMM_MINMAX_FP32_3X4__SCALAR_LRINT, qmin) {
+TEST(QU8_IGEMM_MINMAX_FP32_3X4__SCALAR_LRINTF, qmin) {
GemmMicrokernelTester()
.mr(3)
.nr(4)
@@ -51846,10 +51846,10 @@
.n(4)
.k(1)
.qmin(128)
- .Test(xnn_qu8_igemm_minmax_fp32_ukernel_3x4__scalar_lrint, xnn_init_qu8_conv_minmax_fp32_scalar_lrint_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
+ .Test(xnn_qu8_igemm_minmax_fp32_ukernel_3x4__scalar_lrintf, xnn_init_qu8_conv_minmax_fp32_scalar_lrintf_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
}
-TEST(QU8_IGEMM_MINMAX_FP32_3X4__SCALAR_LRINT, qmax) {
+TEST(QU8_IGEMM_MINMAX_FP32_3X4__SCALAR_LRINTF, qmax) {
GemmMicrokernelTester()
.mr(3)
.nr(4)
@@ -51859,10 +51859,10 @@
.n(4)
.k(1)
.qmax(128)
- .Test(xnn_qu8_igemm_minmax_fp32_ukernel_3x4__scalar_lrint, xnn_init_qu8_conv_minmax_fp32_scalar_lrint_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
+ .Test(xnn_qu8_igemm_minmax_fp32_ukernel_3x4__scalar_lrintf, xnn_init_qu8_conv_minmax_fp32_scalar_lrintf_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
}
-TEST(QU8_IGEMM_MINMAX_FP32_3X4__SCALAR_LRINT, strided_cm) {
+TEST(QU8_IGEMM_MINMAX_FP32_3X4__SCALAR_LRINTF, strided_cm) {
GemmMicrokernelTester()
.mr(3)
.nr(4)
@@ -51872,10 +51872,10 @@
.n(4)
.k(1)
.cm_stride(7)
- .Test(xnn_qu8_igemm_minmax_fp32_ukernel_3x4__scalar_lrint, xnn_init_qu8_conv_minmax_fp32_scalar_lrint_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
+ .Test(xnn_qu8_igemm_minmax_fp32_ukernel_3x4__scalar_lrintf, xnn_init_qu8_conv_minmax_fp32_scalar_lrintf_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
}
-TEST(QU8_IGEMM_MINMAX_FP32_3X4__SCALAR_LRINT, no_a_zero_point) {
+TEST(QU8_IGEMM_MINMAX_FP32_3X4__SCALAR_LRINTF, no_a_zero_point) {
for (size_t k = 1; k <= 5; k += 2) {
GemmMicrokernelTester()
.mr(3)
@@ -51886,11 +51886,11 @@
.n(4)
.k(k)
.a_zero_point(0)
- .Test(xnn_qu8_igemm_minmax_fp32_ukernel_3x4__scalar_lrint, xnn_init_qu8_conv_minmax_fp32_scalar_lrint_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
+ .Test(xnn_qu8_igemm_minmax_fp32_ukernel_3x4__scalar_lrintf, xnn_init_qu8_conv_minmax_fp32_scalar_lrintf_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
}
}
-TEST(QU8_IGEMM_MINMAX_FP32_3X4__SCALAR_LRINT, no_b_zero_point) {
+TEST(QU8_IGEMM_MINMAX_FP32_3X4__SCALAR_LRINTF, no_b_zero_point) {
for (size_t k = 1; k <= 5; k += 2) {
GemmMicrokernelTester()
.mr(3)
@@ -51901,11 +51901,11 @@
.n(4)
.k(k)
.b_zero_point(0)
- .Test(xnn_qu8_igemm_minmax_fp32_ukernel_3x4__scalar_lrint, xnn_init_qu8_conv_minmax_fp32_scalar_lrint_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
+ .Test(xnn_qu8_igemm_minmax_fp32_ukernel_3x4__scalar_lrintf, xnn_init_qu8_conv_minmax_fp32_scalar_lrintf_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
}
}
-TEST(QU8_IGEMM_MINMAX_FP32_3X4__SCALAR_LRINT, no_zero_point) {
+TEST(QU8_IGEMM_MINMAX_FP32_3X4__SCALAR_LRINTF, no_zero_point) {
for (size_t k = 1; k <= 5; k += 2) {
GemmMicrokernelTester()
.mr(3)
@@ -51917,11 +51917,11 @@
.k(k)
.a_zero_point(0)
.b_zero_point(0)
- .Test(xnn_qu8_igemm_minmax_fp32_ukernel_3x4__scalar_lrint, xnn_init_qu8_conv_minmax_fp32_scalar_lrint_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
+ .Test(xnn_qu8_igemm_minmax_fp32_ukernel_3x4__scalar_lrintf, xnn_init_qu8_conv_minmax_fp32_scalar_lrintf_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
}
}
-TEST(QU8_IGEMM_MINMAX_FP32_4X4__SCALAR_LRINT, k_eq_1) {
+TEST(QU8_IGEMM_MINMAX_FP32_4X4__SCALAR_LRINTF, k_eq_1) {
GemmMicrokernelTester()
.mr(4)
.nr(4)
@@ -51930,10 +51930,10 @@
.m(4)
.n(4)
.k(1)
- .Test(xnn_qu8_igemm_minmax_fp32_ukernel_4x4__scalar_lrint, xnn_init_qu8_conv_minmax_fp32_scalar_lrint_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
+ .Test(xnn_qu8_igemm_minmax_fp32_ukernel_4x4__scalar_lrintf, xnn_init_qu8_conv_minmax_fp32_scalar_lrintf_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
}
-TEST(QU8_IGEMM_MINMAX_FP32_4X4__SCALAR_LRINT, strided_cn) {
+TEST(QU8_IGEMM_MINMAX_FP32_4X4__SCALAR_LRINTF, strided_cn) {
GemmMicrokernelTester()
.mr(4)
.nr(4)
@@ -51943,10 +51943,10 @@
.n(4)
.k(1)
.cn_stride(7)
- .Test(xnn_qu8_igemm_minmax_fp32_ukernel_4x4__scalar_lrint, xnn_init_qu8_conv_minmax_fp32_scalar_lrint_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
+ .Test(xnn_qu8_igemm_minmax_fp32_ukernel_4x4__scalar_lrintf, xnn_init_qu8_conv_minmax_fp32_scalar_lrintf_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
}
-TEST(QU8_IGEMM_MINMAX_FP32_4X4__SCALAR_LRINT, k_eq_1_subtile) {
+TEST(QU8_IGEMM_MINMAX_FP32_4X4__SCALAR_LRINTF, k_eq_1_subtile) {
for (uint32_t m = 1; m <= 4; m++) {
for (uint32_t n = 1; n <= 4; n++) {
GemmMicrokernelTester()
@@ -51958,12 +51958,12 @@
.n(n)
.k(1)
.iterations(1)
- .Test(xnn_qu8_igemm_minmax_fp32_ukernel_4x4__scalar_lrint, xnn_init_qu8_conv_minmax_fp32_scalar_lrint_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
+ .Test(xnn_qu8_igemm_minmax_fp32_ukernel_4x4__scalar_lrintf, xnn_init_qu8_conv_minmax_fp32_scalar_lrintf_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
}
}
}
-TEST(QU8_IGEMM_MINMAX_FP32_4X4__SCALAR_LRINT, k_eq_1_subtile_m) {
+TEST(QU8_IGEMM_MINMAX_FP32_4X4__SCALAR_LRINTF, k_eq_1_subtile_m) {
for (uint32_t m = 1; m <= 4; m++) {
GemmMicrokernelTester()
.mr(4)
@@ -51974,11 +51974,11 @@
.n(4)
.k(1)
.iterations(1)
- .Test(xnn_qu8_igemm_minmax_fp32_ukernel_4x4__scalar_lrint, xnn_init_qu8_conv_minmax_fp32_scalar_lrint_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
+ .Test(xnn_qu8_igemm_minmax_fp32_ukernel_4x4__scalar_lrintf, xnn_init_qu8_conv_minmax_fp32_scalar_lrintf_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
}
}
-TEST(QU8_IGEMM_MINMAX_FP32_4X4__SCALAR_LRINT, k_eq_1_subtile_n) {
+TEST(QU8_IGEMM_MINMAX_FP32_4X4__SCALAR_LRINTF, k_eq_1_subtile_n) {
for (uint32_t n = 1; n <= 4; n++) {
GemmMicrokernelTester()
.mr(4)
@@ -51989,11 +51989,11 @@
.n(n)
.k(1)
.iterations(1)
- .Test(xnn_qu8_igemm_minmax_fp32_ukernel_4x4__scalar_lrint, xnn_init_qu8_conv_minmax_fp32_scalar_lrint_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
+ .Test(xnn_qu8_igemm_minmax_fp32_ukernel_4x4__scalar_lrintf, xnn_init_qu8_conv_minmax_fp32_scalar_lrintf_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
}
}
-TEST(QU8_IGEMM_MINMAX_FP32_4X4__SCALAR_LRINT, k_gt_1) {
+TEST(QU8_IGEMM_MINMAX_FP32_4X4__SCALAR_LRINTF, k_gt_1) {
for (size_t k = 2; k < 10; k++) {
GemmMicrokernelTester()
.mr(4)
@@ -52003,11 +52003,11 @@
.m(4)
.n(4)
.k(k)
- .Test(xnn_qu8_igemm_minmax_fp32_ukernel_4x4__scalar_lrint, xnn_init_qu8_conv_minmax_fp32_scalar_lrint_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
+ .Test(xnn_qu8_igemm_minmax_fp32_ukernel_4x4__scalar_lrintf, xnn_init_qu8_conv_minmax_fp32_scalar_lrintf_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
}
}
-TEST(QU8_IGEMM_MINMAX_FP32_4X4__SCALAR_LRINT, k_gt_1_subtile) {
+TEST(QU8_IGEMM_MINMAX_FP32_4X4__SCALAR_LRINTF, k_gt_1_subtile) {
for (size_t k = 2; k < 10; k++) {
for (uint32_t m = 1; m <= 4; m++) {
for (uint32_t n = 1; n <= 4; n++) {
@@ -52020,13 +52020,13 @@
.n(n)
.k(k)
.iterations(1)
- .Test(xnn_qu8_igemm_minmax_fp32_ukernel_4x4__scalar_lrint, xnn_init_qu8_conv_minmax_fp32_scalar_lrint_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
+ .Test(xnn_qu8_igemm_minmax_fp32_ukernel_4x4__scalar_lrintf, xnn_init_qu8_conv_minmax_fp32_scalar_lrintf_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
}
}
}
}
-TEST(QU8_IGEMM_MINMAX_FP32_4X4__SCALAR_LRINT, n_gt_4) {
+TEST(QU8_IGEMM_MINMAX_FP32_4X4__SCALAR_LRINTF, n_gt_4) {
for (uint32_t n = 5; n < 8; n++) {
for (size_t k = 1; k <= 5; k += 2) {
GemmMicrokernelTester()
@@ -52037,12 +52037,12 @@
.m(4)
.n(4)
.k(k)
- .Test(xnn_qu8_igemm_minmax_fp32_ukernel_4x4__scalar_lrint, xnn_init_qu8_conv_minmax_fp32_scalar_lrint_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
+ .Test(xnn_qu8_igemm_minmax_fp32_ukernel_4x4__scalar_lrintf, xnn_init_qu8_conv_minmax_fp32_scalar_lrintf_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
}
}
}
-TEST(QU8_IGEMM_MINMAX_FP32_4X4__SCALAR_LRINT, n_gt_4_strided_cn) {
+TEST(QU8_IGEMM_MINMAX_FP32_4X4__SCALAR_LRINTF, n_gt_4_strided_cn) {
for (uint32_t n = 5; n < 8; n++) {
for (size_t k = 1; k <= 5; k += 2) {
GemmMicrokernelTester()
@@ -52054,12 +52054,12 @@
.n(4)
.k(k)
.cn_stride(7)
- .Test(xnn_qu8_igemm_minmax_fp32_ukernel_4x4__scalar_lrint, xnn_init_qu8_conv_minmax_fp32_scalar_lrint_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
+ .Test(xnn_qu8_igemm_minmax_fp32_ukernel_4x4__scalar_lrintf, xnn_init_qu8_conv_minmax_fp32_scalar_lrintf_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
}
}
}
-TEST(QU8_IGEMM_MINMAX_FP32_4X4__SCALAR_LRINT, n_gt_4_subtile) {
+TEST(QU8_IGEMM_MINMAX_FP32_4X4__SCALAR_LRINTF, n_gt_4_subtile) {
for (uint32_t n = 5; n < 8; n++) {
for (size_t k = 1; k <= 5; k += 2) {
for (uint32_t m = 1; m <= 4; m++) {
@@ -52072,13 +52072,13 @@
.n(n)
.k(k)
.iterations(1)
- .Test(xnn_qu8_igemm_minmax_fp32_ukernel_4x4__scalar_lrint, xnn_init_qu8_conv_minmax_fp32_scalar_lrint_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
+ .Test(xnn_qu8_igemm_minmax_fp32_ukernel_4x4__scalar_lrintf, xnn_init_qu8_conv_minmax_fp32_scalar_lrintf_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
}
}
}
}
-TEST(QU8_IGEMM_MINMAX_FP32_4X4__SCALAR_LRINT, n_div_4) {
+TEST(QU8_IGEMM_MINMAX_FP32_4X4__SCALAR_LRINTF, n_div_4) {
for (uint32_t n = 8; n <= 12; n += 4) {
for (size_t k = 1; k <= 5; k += 2) {
GemmMicrokernelTester()
@@ -52089,12 +52089,12 @@
.m(4)
.n(4)
.k(k)
- .Test(xnn_qu8_igemm_minmax_fp32_ukernel_4x4__scalar_lrint, xnn_init_qu8_conv_minmax_fp32_scalar_lrint_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
+ .Test(xnn_qu8_igemm_minmax_fp32_ukernel_4x4__scalar_lrintf, xnn_init_qu8_conv_minmax_fp32_scalar_lrintf_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
}
}
}
-TEST(QU8_IGEMM_MINMAX_FP32_4X4__SCALAR_LRINT, n_div_4_strided_cn) {
+TEST(QU8_IGEMM_MINMAX_FP32_4X4__SCALAR_LRINTF, n_div_4_strided_cn) {
for (uint32_t n = 8; n <= 12; n += 4) {
for (size_t k = 1; k <= 5; k += 2) {
GemmMicrokernelTester()
@@ -52106,12 +52106,12 @@
.n(n)
.k(k)
.cn_stride(7)
- .Test(xnn_qu8_igemm_minmax_fp32_ukernel_4x4__scalar_lrint, xnn_init_qu8_conv_minmax_fp32_scalar_lrint_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
+ .Test(xnn_qu8_igemm_minmax_fp32_ukernel_4x4__scalar_lrintf, xnn_init_qu8_conv_minmax_fp32_scalar_lrintf_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
}
}
}
-TEST(QU8_IGEMM_MINMAX_FP32_4X4__SCALAR_LRINT, n_div_4_subtile) {
+TEST(QU8_IGEMM_MINMAX_FP32_4X4__SCALAR_LRINTF, n_div_4_subtile) {
for (uint32_t n = 8; n <= 12; n += 4) {
for (size_t k = 1; k <= 5; k += 2) {
for (uint32_t m = 1; m <= 4; m++) {
@@ -52124,13 +52124,13 @@
.n(n)
.k(k)
.iterations(1)
- .Test(xnn_qu8_igemm_minmax_fp32_ukernel_4x4__scalar_lrint, xnn_init_qu8_conv_minmax_fp32_scalar_lrint_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
+ .Test(xnn_qu8_igemm_minmax_fp32_ukernel_4x4__scalar_lrintf, xnn_init_qu8_conv_minmax_fp32_scalar_lrintf_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
}
}
}
}
-TEST(QU8_IGEMM_MINMAX_FP32_4X4__SCALAR_LRINT, small_kernel) {
+TEST(QU8_IGEMM_MINMAX_FP32_4X4__SCALAR_LRINTF, small_kernel) {
for (size_t k = 1; k <= 5; k += 2) {
GemmMicrokernelTester()
.mr(4)
@@ -52141,11 +52141,11 @@
.n(4)
.k(k)
.ks(3)
- .Test(xnn_qu8_igemm_minmax_fp32_ukernel_4x4__scalar_lrint, xnn_init_qu8_conv_minmax_fp32_scalar_lrint_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
+ .Test(xnn_qu8_igemm_minmax_fp32_ukernel_4x4__scalar_lrintf, xnn_init_qu8_conv_minmax_fp32_scalar_lrintf_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
}
}
-TEST(QU8_IGEMM_MINMAX_FP32_4X4__SCALAR_LRINT, small_kernel_subtile) {
+TEST(QU8_IGEMM_MINMAX_FP32_4X4__SCALAR_LRINTF, small_kernel_subtile) {
for (size_t k = 1; k <= 5; k += 2) {
for (uint32_t m = 1; m <= 4; m++) {
for (uint32_t n = 1; n <= 4; n++) {
@@ -52159,13 +52159,13 @@
.k(k)
.ks(3)
.iterations(1)
- .Test(xnn_qu8_igemm_minmax_fp32_ukernel_4x4__scalar_lrint, xnn_init_qu8_conv_minmax_fp32_scalar_lrint_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
+ .Test(xnn_qu8_igemm_minmax_fp32_ukernel_4x4__scalar_lrintf, xnn_init_qu8_conv_minmax_fp32_scalar_lrintf_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
}
}
}
}
-TEST(QU8_IGEMM_MINMAX_FP32_4X4__SCALAR_LRINT, n_gt_4_small_kernel) {
+TEST(QU8_IGEMM_MINMAX_FP32_4X4__SCALAR_LRINTF, n_gt_4_small_kernel) {
for (uint32_t n = 5; n < 8; n++) {
for (size_t k = 1; k <= 5; k += 2) {
GemmMicrokernelTester()
@@ -52177,12 +52177,12 @@
.n(4)
.k(k)
.ks(3)
- .Test(xnn_qu8_igemm_minmax_fp32_ukernel_4x4__scalar_lrint, xnn_init_qu8_conv_minmax_fp32_scalar_lrint_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
+ .Test(xnn_qu8_igemm_minmax_fp32_ukernel_4x4__scalar_lrintf, xnn_init_qu8_conv_minmax_fp32_scalar_lrintf_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
}
}
}
-TEST(QU8_IGEMM_MINMAX_FP32_4X4__SCALAR_LRINT, n_div_4_small_kernel) {
+TEST(QU8_IGEMM_MINMAX_FP32_4X4__SCALAR_LRINTF, n_div_4_small_kernel) {
for (uint32_t n = 8; n <= 12; n += 4) {
for (size_t k = 1; k <= 5; k += 2) {
GemmMicrokernelTester()
@@ -52194,12 +52194,12 @@
.n(4)
.k(k)
.ks(3)
- .Test(xnn_qu8_igemm_minmax_fp32_ukernel_4x4__scalar_lrint, xnn_init_qu8_conv_minmax_fp32_scalar_lrint_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
+ .Test(xnn_qu8_igemm_minmax_fp32_ukernel_4x4__scalar_lrintf, xnn_init_qu8_conv_minmax_fp32_scalar_lrintf_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
}
}
}
-TEST(QU8_IGEMM_MINMAX_FP32_4X4__SCALAR_LRINT, strided_cm_subtile) {
+TEST(QU8_IGEMM_MINMAX_FP32_4X4__SCALAR_LRINTF, strided_cm_subtile) {
for (size_t k = 1; k <= 5; k += 2) {
for (uint32_t m = 1; m <= 4; m++) {
for (uint32_t n = 1; n <= 4; n++) {
@@ -52213,13 +52213,13 @@
.k(k)
.cm_stride(7)
.iterations(1)
- .Test(xnn_qu8_igemm_minmax_fp32_ukernel_4x4__scalar_lrint, xnn_init_qu8_conv_minmax_fp32_scalar_lrint_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
+ .Test(xnn_qu8_igemm_minmax_fp32_ukernel_4x4__scalar_lrintf, xnn_init_qu8_conv_minmax_fp32_scalar_lrintf_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
}
}
}
}
-TEST(QU8_IGEMM_MINMAX_FP32_4X4__SCALAR_LRINT, a_offset) {
+TEST(QU8_IGEMM_MINMAX_FP32_4X4__SCALAR_LRINTF, a_offset) {
for (size_t k = 1; k <= 5; k += 2) {
GemmMicrokernelTester()
.mr(4)
@@ -52231,11 +52231,11 @@
.k(k)
.ks(3)
.a_offset(23)
- .Test(xnn_qu8_igemm_minmax_fp32_ukernel_4x4__scalar_lrint, xnn_init_qu8_conv_minmax_fp32_scalar_lrint_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
+ .Test(xnn_qu8_igemm_minmax_fp32_ukernel_4x4__scalar_lrintf, xnn_init_qu8_conv_minmax_fp32_scalar_lrintf_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
}
}
-TEST(QU8_IGEMM_MINMAX_FP32_4X4__SCALAR_LRINT, zero) {
+TEST(QU8_IGEMM_MINMAX_FP32_4X4__SCALAR_LRINTF, zero) {
for (uint32_t mz = 0; mz < 4; mz++) {
for (size_t k = 1; k <= 5; k += 2) {
GemmMicrokernelTester()
@@ -52249,12 +52249,12 @@
.ks(3)
.a_offset(23)
.zero_index(mz)
- .Test(xnn_qu8_igemm_minmax_fp32_ukernel_4x4__scalar_lrint, xnn_init_qu8_conv_minmax_fp32_scalar_lrint_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
+ .Test(xnn_qu8_igemm_minmax_fp32_ukernel_4x4__scalar_lrintf, xnn_init_qu8_conv_minmax_fp32_scalar_lrintf_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
}
}
}
-TEST(QU8_IGEMM_MINMAX_FP32_4X4__SCALAR_LRINT, qmin) {
+TEST(QU8_IGEMM_MINMAX_FP32_4X4__SCALAR_LRINTF, qmin) {
GemmMicrokernelTester()
.mr(4)
.nr(4)
@@ -52264,10 +52264,10 @@
.n(4)
.k(1)
.qmin(128)
- .Test(xnn_qu8_igemm_minmax_fp32_ukernel_4x4__scalar_lrint, xnn_init_qu8_conv_minmax_fp32_scalar_lrint_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
+ .Test(xnn_qu8_igemm_minmax_fp32_ukernel_4x4__scalar_lrintf, xnn_init_qu8_conv_minmax_fp32_scalar_lrintf_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
}
-TEST(QU8_IGEMM_MINMAX_FP32_4X4__SCALAR_LRINT, qmax) {
+TEST(QU8_IGEMM_MINMAX_FP32_4X4__SCALAR_LRINTF, qmax) {
GemmMicrokernelTester()
.mr(4)
.nr(4)
@@ -52277,10 +52277,10 @@
.n(4)
.k(1)
.qmax(128)
- .Test(xnn_qu8_igemm_minmax_fp32_ukernel_4x4__scalar_lrint, xnn_init_qu8_conv_minmax_fp32_scalar_lrint_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
+ .Test(xnn_qu8_igemm_minmax_fp32_ukernel_4x4__scalar_lrintf, xnn_init_qu8_conv_minmax_fp32_scalar_lrintf_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
}
-TEST(QU8_IGEMM_MINMAX_FP32_4X4__SCALAR_LRINT, strided_cm) {
+TEST(QU8_IGEMM_MINMAX_FP32_4X4__SCALAR_LRINTF, strided_cm) {
GemmMicrokernelTester()
.mr(4)
.nr(4)
@@ -52290,10 +52290,10 @@
.n(4)
.k(1)
.cm_stride(7)
- .Test(xnn_qu8_igemm_minmax_fp32_ukernel_4x4__scalar_lrint, xnn_init_qu8_conv_minmax_fp32_scalar_lrint_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
+ .Test(xnn_qu8_igemm_minmax_fp32_ukernel_4x4__scalar_lrintf, xnn_init_qu8_conv_minmax_fp32_scalar_lrintf_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
}
-TEST(QU8_IGEMM_MINMAX_FP32_4X4__SCALAR_LRINT, no_a_zero_point) {
+TEST(QU8_IGEMM_MINMAX_FP32_4X4__SCALAR_LRINTF, no_a_zero_point) {
for (size_t k = 1; k <= 5; k += 2) {
GemmMicrokernelTester()
.mr(4)
@@ -52304,11 +52304,11 @@
.n(4)
.k(k)
.a_zero_point(0)
- .Test(xnn_qu8_igemm_minmax_fp32_ukernel_4x4__scalar_lrint, xnn_init_qu8_conv_minmax_fp32_scalar_lrint_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
+ .Test(xnn_qu8_igemm_minmax_fp32_ukernel_4x4__scalar_lrintf, xnn_init_qu8_conv_minmax_fp32_scalar_lrintf_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
}
}
-TEST(QU8_IGEMM_MINMAX_FP32_4X4__SCALAR_LRINT, no_b_zero_point) {
+TEST(QU8_IGEMM_MINMAX_FP32_4X4__SCALAR_LRINTF, no_b_zero_point) {
for (size_t k = 1; k <= 5; k += 2) {
GemmMicrokernelTester()
.mr(4)
@@ -52319,11 +52319,11 @@
.n(4)
.k(k)
.b_zero_point(0)
- .Test(xnn_qu8_igemm_minmax_fp32_ukernel_4x4__scalar_lrint, xnn_init_qu8_conv_minmax_fp32_scalar_lrint_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
+ .Test(xnn_qu8_igemm_minmax_fp32_ukernel_4x4__scalar_lrintf, xnn_init_qu8_conv_minmax_fp32_scalar_lrintf_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
}
}
-TEST(QU8_IGEMM_MINMAX_FP32_4X4__SCALAR_LRINT, no_zero_point) {
+TEST(QU8_IGEMM_MINMAX_FP32_4X4__SCALAR_LRINTF, no_zero_point) {
for (size_t k = 1; k <= 5; k += 2) {
GemmMicrokernelTester()
.mr(4)
@@ -52335,11 +52335,11 @@
.k(k)
.a_zero_point(0)
.b_zero_point(0)
- .Test(xnn_qu8_igemm_minmax_fp32_ukernel_4x4__scalar_lrint, xnn_init_qu8_conv_minmax_fp32_scalar_lrint_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
+ .Test(xnn_qu8_igemm_minmax_fp32_ukernel_4x4__scalar_lrintf, xnn_init_qu8_conv_minmax_fp32_scalar_lrintf_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
}
}
-TEST(QU8_IGEMM_MINMAX_FP32_1X2__SCALAR_MAGIC, k_eq_1) {
+TEST(QU8_IGEMM_MINMAX_FP32_1X2__SCALAR_FMAGIC, k_eq_1) {
GemmMicrokernelTester()
.mr(1)
.nr(2)
@@ -52348,10 +52348,10 @@
.m(1)
.n(2)
.k(1)
- .Test(xnn_qu8_igemm_minmax_fp32_ukernel_1x2__scalar_magic, xnn_init_qu8_conv_minmax_fp32_scalar_magic_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
+ .Test(xnn_qu8_igemm_minmax_fp32_ukernel_1x2__scalar_fmagic, xnn_init_qu8_conv_minmax_fp32_scalar_fmagic_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
}
-TEST(QU8_IGEMM_MINMAX_FP32_1X2__SCALAR_MAGIC, strided_cn) {
+TEST(QU8_IGEMM_MINMAX_FP32_1X2__SCALAR_FMAGIC, strided_cn) {
GemmMicrokernelTester()
.mr(1)
.nr(2)
@@ -52361,10 +52361,10 @@
.n(2)
.k(1)
.cn_stride(5)
- .Test(xnn_qu8_igemm_minmax_fp32_ukernel_1x2__scalar_magic, xnn_init_qu8_conv_minmax_fp32_scalar_magic_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
+ .Test(xnn_qu8_igemm_minmax_fp32_ukernel_1x2__scalar_fmagic, xnn_init_qu8_conv_minmax_fp32_scalar_fmagic_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
}
-TEST(QU8_IGEMM_MINMAX_FP32_1X2__SCALAR_MAGIC, k_eq_1_subtile) {
+TEST(QU8_IGEMM_MINMAX_FP32_1X2__SCALAR_FMAGIC, k_eq_1_subtile) {
for (uint32_t m = 1; m <= 1; m++) {
for (uint32_t n = 1; n <= 2; n++) {
GemmMicrokernelTester()
@@ -52376,12 +52376,12 @@
.n(n)
.k(1)
.iterations(1)
- .Test(xnn_qu8_igemm_minmax_fp32_ukernel_1x2__scalar_magic, xnn_init_qu8_conv_minmax_fp32_scalar_magic_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
+ .Test(xnn_qu8_igemm_minmax_fp32_ukernel_1x2__scalar_fmagic, xnn_init_qu8_conv_minmax_fp32_scalar_fmagic_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
}
}
}
-TEST(QU8_IGEMM_MINMAX_FP32_1X2__SCALAR_MAGIC, k_eq_1_subtile_m) {
+TEST(QU8_IGEMM_MINMAX_FP32_1X2__SCALAR_FMAGIC, k_eq_1_subtile_m) {
for (uint32_t m = 1; m <= 1; m++) {
GemmMicrokernelTester()
.mr(1)
@@ -52392,11 +52392,11 @@
.n(2)
.k(1)
.iterations(1)
- .Test(xnn_qu8_igemm_minmax_fp32_ukernel_1x2__scalar_magic, xnn_init_qu8_conv_minmax_fp32_scalar_magic_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
+ .Test(xnn_qu8_igemm_minmax_fp32_ukernel_1x2__scalar_fmagic, xnn_init_qu8_conv_minmax_fp32_scalar_fmagic_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
}
}
-TEST(QU8_IGEMM_MINMAX_FP32_1X2__SCALAR_MAGIC, k_eq_1_subtile_n) {
+TEST(QU8_IGEMM_MINMAX_FP32_1X2__SCALAR_FMAGIC, k_eq_1_subtile_n) {
for (uint32_t n = 1; n <= 2; n++) {
GemmMicrokernelTester()
.mr(1)
@@ -52407,11 +52407,11 @@
.n(n)
.k(1)
.iterations(1)
- .Test(xnn_qu8_igemm_minmax_fp32_ukernel_1x2__scalar_magic, xnn_init_qu8_conv_minmax_fp32_scalar_magic_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
+ .Test(xnn_qu8_igemm_minmax_fp32_ukernel_1x2__scalar_fmagic, xnn_init_qu8_conv_minmax_fp32_scalar_fmagic_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
}
}
-TEST(QU8_IGEMM_MINMAX_FP32_1X2__SCALAR_MAGIC, k_gt_1) {
+TEST(QU8_IGEMM_MINMAX_FP32_1X2__SCALAR_FMAGIC, k_gt_1) {
for (size_t k = 2; k < 10; k++) {
GemmMicrokernelTester()
.mr(1)
@@ -52421,11 +52421,11 @@
.m(1)
.n(2)
.k(k)
- .Test(xnn_qu8_igemm_minmax_fp32_ukernel_1x2__scalar_magic, xnn_init_qu8_conv_minmax_fp32_scalar_magic_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
+ .Test(xnn_qu8_igemm_minmax_fp32_ukernel_1x2__scalar_fmagic, xnn_init_qu8_conv_minmax_fp32_scalar_fmagic_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
}
}
-TEST(QU8_IGEMM_MINMAX_FP32_1X2__SCALAR_MAGIC, k_gt_1_subtile) {
+TEST(QU8_IGEMM_MINMAX_FP32_1X2__SCALAR_FMAGIC, k_gt_1_subtile) {
for (size_t k = 2; k < 10; k++) {
for (uint32_t m = 1; m <= 1; m++) {
for (uint32_t n = 1; n <= 2; n++) {
@@ -52438,13 +52438,13 @@
.n(n)
.k(k)
.iterations(1)
- .Test(xnn_qu8_igemm_minmax_fp32_ukernel_1x2__scalar_magic, xnn_init_qu8_conv_minmax_fp32_scalar_magic_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
+ .Test(xnn_qu8_igemm_minmax_fp32_ukernel_1x2__scalar_fmagic, xnn_init_qu8_conv_minmax_fp32_scalar_fmagic_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
}
}
}
}
-TEST(QU8_IGEMM_MINMAX_FP32_1X2__SCALAR_MAGIC, n_gt_2) {
+TEST(QU8_IGEMM_MINMAX_FP32_1X2__SCALAR_FMAGIC, n_gt_2) {
for (uint32_t n = 3; n < 4; n++) {
for (size_t k = 1; k <= 5; k += 2) {
GemmMicrokernelTester()
@@ -52455,12 +52455,12 @@
.m(1)
.n(2)
.k(k)
- .Test(xnn_qu8_igemm_minmax_fp32_ukernel_1x2__scalar_magic, xnn_init_qu8_conv_minmax_fp32_scalar_magic_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
+ .Test(xnn_qu8_igemm_minmax_fp32_ukernel_1x2__scalar_fmagic, xnn_init_qu8_conv_minmax_fp32_scalar_fmagic_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
}
}
}
-TEST(QU8_IGEMM_MINMAX_FP32_1X2__SCALAR_MAGIC, n_gt_2_strided_cn) {
+TEST(QU8_IGEMM_MINMAX_FP32_1X2__SCALAR_FMAGIC, n_gt_2_strided_cn) {
for (uint32_t n = 3; n < 4; n++) {
for (size_t k = 1; k <= 5; k += 2) {
GemmMicrokernelTester()
@@ -52472,12 +52472,12 @@
.n(2)
.k(k)
.cn_stride(5)
- .Test(xnn_qu8_igemm_minmax_fp32_ukernel_1x2__scalar_magic, xnn_init_qu8_conv_minmax_fp32_scalar_magic_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
+ .Test(xnn_qu8_igemm_minmax_fp32_ukernel_1x2__scalar_fmagic, xnn_init_qu8_conv_minmax_fp32_scalar_fmagic_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
}
}
}
-TEST(QU8_IGEMM_MINMAX_FP32_1X2__SCALAR_MAGIC, n_gt_2_subtile) {
+TEST(QU8_IGEMM_MINMAX_FP32_1X2__SCALAR_FMAGIC, n_gt_2_subtile) {
for (uint32_t n = 3; n < 4; n++) {
for (size_t k = 1; k <= 5; k += 2) {
for (uint32_t m = 1; m <= 1; m++) {
@@ -52490,13 +52490,13 @@
.n(n)
.k(k)
.iterations(1)
- .Test(xnn_qu8_igemm_minmax_fp32_ukernel_1x2__scalar_magic, xnn_init_qu8_conv_minmax_fp32_scalar_magic_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
+ .Test(xnn_qu8_igemm_minmax_fp32_ukernel_1x2__scalar_fmagic, xnn_init_qu8_conv_minmax_fp32_scalar_fmagic_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
}
}
}
}
-TEST(QU8_IGEMM_MINMAX_FP32_1X2__SCALAR_MAGIC, n_div_2) {
+TEST(QU8_IGEMM_MINMAX_FP32_1X2__SCALAR_FMAGIC, n_div_2) {
for (uint32_t n = 4; n <= 6; n += 2) {
for (size_t k = 1; k <= 5; k += 2) {
GemmMicrokernelTester()
@@ -52507,12 +52507,12 @@
.m(1)
.n(2)
.k(k)
- .Test(xnn_qu8_igemm_minmax_fp32_ukernel_1x2__scalar_magic, xnn_init_qu8_conv_minmax_fp32_scalar_magic_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
+ .Test(xnn_qu8_igemm_minmax_fp32_ukernel_1x2__scalar_fmagic, xnn_init_qu8_conv_minmax_fp32_scalar_fmagic_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
}
}
}
-TEST(QU8_IGEMM_MINMAX_FP32_1X2__SCALAR_MAGIC, n_div_2_strided_cn) {
+TEST(QU8_IGEMM_MINMAX_FP32_1X2__SCALAR_FMAGIC, n_div_2_strided_cn) {
for (uint32_t n = 4; n <= 6; n += 2) {
for (size_t k = 1; k <= 5; k += 2) {
GemmMicrokernelTester()
@@ -52524,12 +52524,12 @@
.n(n)
.k(k)
.cn_stride(5)
- .Test(xnn_qu8_igemm_minmax_fp32_ukernel_1x2__scalar_magic, xnn_init_qu8_conv_minmax_fp32_scalar_magic_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
+ .Test(xnn_qu8_igemm_minmax_fp32_ukernel_1x2__scalar_fmagic, xnn_init_qu8_conv_minmax_fp32_scalar_fmagic_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
}
}
}
-TEST(QU8_IGEMM_MINMAX_FP32_1X2__SCALAR_MAGIC, n_div_2_subtile) {
+TEST(QU8_IGEMM_MINMAX_FP32_1X2__SCALAR_FMAGIC, n_div_2_subtile) {
for (uint32_t n = 4; n <= 6; n += 2) {
for (size_t k = 1; k <= 5; k += 2) {
for (uint32_t m = 1; m <= 1; m++) {
@@ -52542,13 +52542,13 @@
.n(n)
.k(k)
.iterations(1)
- .Test(xnn_qu8_igemm_minmax_fp32_ukernel_1x2__scalar_magic, xnn_init_qu8_conv_minmax_fp32_scalar_magic_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
+ .Test(xnn_qu8_igemm_minmax_fp32_ukernel_1x2__scalar_fmagic, xnn_init_qu8_conv_minmax_fp32_scalar_fmagic_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
}
}
}
}
-TEST(QU8_IGEMM_MINMAX_FP32_1X2__SCALAR_MAGIC, small_kernel) {
+TEST(QU8_IGEMM_MINMAX_FP32_1X2__SCALAR_FMAGIC, small_kernel) {
for (size_t k = 1; k <= 5; k += 2) {
GemmMicrokernelTester()
.mr(1)
@@ -52559,11 +52559,11 @@
.n(2)
.k(k)
.ks(3)
- .Test(xnn_qu8_igemm_minmax_fp32_ukernel_1x2__scalar_magic, xnn_init_qu8_conv_minmax_fp32_scalar_magic_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
+ .Test(xnn_qu8_igemm_minmax_fp32_ukernel_1x2__scalar_fmagic, xnn_init_qu8_conv_minmax_fp32_scalar_fmagic_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
}
}
-TEST(QU8_IGEMM_MINMAX_FP32_1X2__SCALAR_MAGIC, small_kernel_subtile) {
+TEST(QU8_IGEMM_MINMAX_FP32_1X2__SCALAR_FMAGIC, small_kernel_subtile) {
for (size_t k = 1; k <= 5; k += 2) {
for (uint32_t m = 1; m <= 1; m++) {
for (uint32_t n = 1; n <= 2; n++) {
@@ -52577,13 +52577,13 @@
.k(k)
.ks(3)
.iterations(1)
- .Test(xnn_qu8_igemm_minmax_fp32_ukernel_1x2__scalar_magic, xnn_init_qu8_conv_minmax_fp32_scalar_magic_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
+ .Test(xnn_qu8_igemm_minmax_fp32_ukernel_1x2__scalar_fmagic, xnn_init_qu8_conv_minmax_fp32_scalar_fmagic_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
}
}
}
}
-TEST(QU8_IGEMM_MINMAX_FP32_1X2__SCALAR_MAGIC, n_gt_2_small_kernel) {
+TEST(QU8_IGEMM_MINMAX_FP32_1X2__SCALAR_FMAGIC, n_gt_2_small_kernel) {
for (uint32_t n = 3; n < 4; n++) {
for (size_t k = 1; k <= 5; k += 2) {
GemmMicrokernelTester()
@@ -52595,12 +52595,12 @@
.n(2)
.k(k)
.ks(3)
- .Test(xnn_qu8_igemm_minmax_fp32_ukernel_1x2__scalar_magic, xnn_init_qu8_conv_minmax_fp32_scalar_magic_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
+ .Test(xnn_qu8_igemm_minmax_fp32_ukernel_1x2__scalar_fmagic, xnn_init_qu8_conv_minmax_fp32_scalar_fmagic_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
}
}
}
-TEST(QU8_IGEMM_MINMAX_FP32_1X2__SCALAR_MAGIC, n_div_2_small_kernel) {
+TEST(QU8_IGEMM_MINMAX_FP32_1X2__SCALAR_FMAGIC, n_div_2_small_kernel) {
for (uint32_t n = 4; n <= 6; n += 2) {
for (size_t k = 1; k <= 5; k += 2) {
GemmMicrokernelTester()
@@ -52612,12 +52612,12 @@
.n(2)
.k(k)
.ks(3)
- .Test(xnn_qu8_igemm_minmax_fp32_ukernel_1x2__scalar_magic, xnn_init_qu8_conv_minmax_fp32_scalar_magic_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
+ .Test(xnn_qu8_igemm_minmax_fp32_ukernel_1x2__scalar_fmagic, xnn_init_qu8_conv_minmax_fp32_scalar_fmagic_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
}
}
}
-TEST(QU8_IGEMM_MINMAX_FP32_1X2__SCALAR_MAGIC, strided_cm_subtile) {
+TEST(QU8_IGEMM_MINMAX_FP32_1X2__SCALAR_FMAGIC, strided_cm_subtile) {
for (size_t k = 1; k <= 5; k += 2) {
for (uint32_t m = 1; m <= 1; m++) {
for (uint32_t n = 1; n <= 2; n++) {
@@ -52631,13 +52631,13 @@
.k(k)
.cm_stride(5)
.iterations(1)
- .Test(xnn_qu8_igemm_minmax_fp32_ukernel_1x2__scalar_magic, xnn_init_qu8_conv_minmax_fp32_scalar_magic_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
+ .Test(xnn_qu8_igemm_minmax_fp32_ukernel_1x2__scalar_fmagic, xnn_init_qu8_conv_minmax_fp32_scalar_fmagic_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
}
}
}
}
-TEST(QU8_IGEMM_MINMAX_FP32_1X2__SCALAR_MAGIC, a_offset) {
+TEST(QU8_IGEMM_MINMAX_FP32_1X2__SCALAR_FMAGIC, a_offset) {
for (size_t k = 1; k <= 5; k += 2) {
GemmMicrokernelTester()
.mr(1)
@@ -52649,11 +52649,11 @@
.k(k)
.ks(3)
.a_offset(7)
- .Test(xnn_qu8_igemm_minmax_fp32_ukernel_1x2__scalar_magic, xnn_init_qu8_conv_minmax_fp32_scalar_magic_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
+ .Test(xnn_qu8_igemm_minmax_fp32_ukernel_1x2__scalar_fmagic, xnn_init_qu8_conv_minmax_fp32_scalar_fmagic_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
}
}
-TEST(QU8_IGEMM_MINMAX_FP32_1X2__SCALAR_MAGIC, zero) {
+TEST(QU8_IGEMM_MINMAX_FP32_1X2__SCALAR_FMAGIC, zero) {
for (uint32_t mz = 0; mz < 1; mz++) {
for (size_t k = 1; k <= 5; k += 2) {
GemmMicrokernelTester()
@@ -52667,12 +52667,12 @@
.ks(3)
.a_offset(7)
.zero_index(mz)
- .Test(xnn_qu8_igemm_minmax_fp32_ukernel_1x2__scalar_magic, xnn_init_qu8_conv_minmax_fp32_scalar_magic_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
+ .Test(xnn_qu8_igemm_minmax_fp32_ukernel_1x2__scalar_fmagic, xnn_init_qu8_conv_minmax_fp32_scalar_fmagic_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
}
}
}
-TEST(QU8_IGEMM_MINMAX_FP32_1X2__SCALAR_MAGIC, qmin) {
+TEST(QU8_IGEMM_MINMAX_FP32_1X2__SCALAR_FMAGIC, qmin) {
GemmMicrokernelTester()
.mr(1)
.nr(2)
@@ -52682,10 +52682,10 @@
.n(2)
.k(1)
.qmin(128)
- .Test(xnn_qu8_igemm_minmax_fp32_ukernel_1x2__scalar_magic, xnn_init_qu8_conv_minmax_fp32_scalar_magic_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
+ .Test(xnn_qu8_igemm_minmax_fp32_ukernel_1x2__scalar_fmagic, xnn_init_qu8_conv_minmax_fp32_scalar_fmagic_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
}
-TEST(QU8_IGEMM_MINMAX_FP32_1X2__SCALAR_MAGIC, qmax) {
+TEST(QU8_IGEMM_MINMAX_FP32_1X2__SCALAR_FMAGIC, qmax) {
GemmMicrokernelTester()
.mr(1)
.nr(2)
@@ -52695,10 +52695,10 @@
.n(2)
.k(1)
.qmax(128)
- .Test(xnn_qu8_igemm_minmax_fp32_ukernel_1x2__scalar_magic, xnn_init_qu8_conv_minmax_fp32_scalar_magic_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
+ .Test(xnn_qu8_igemm_minmax_fp32_ukernel_1x2__scalar_fmagic, xnn_init_qu8_conv_minmax_fp32_scalar_fmagic_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
}
-TEST(QU8_IGEMM_MINMAX_FP32_1X2__SCALAR_MAGIC, strided_cm) {
+TEST(QU8_IGEMM_MINMAX_FP32_1X2__SCALAR_FMAGIC, strided_cm) {
GemmMicrokernelTester()
.mr(1)
.nr(2)
@@ -52708,10 +52708,10 @@
.n(2)
.k(1)
.cm_stride(5)
- .Test(xnn_qu8_igemm_minmax_fp32_ukernel_1x2__scalar_magic, xnn_init_qu8_conv_minmax_fp32_scalar_magic_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
+ .Test(xnn_qu8_igemm_minmax_fp32_ukernel_1x2__scalar_fmagic, xnn_init_qu8_conv_minmax_fp32_scalar_fmagic_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
}
-TEST(QU8_IGEMM_MINMAX_FP32_1X2__SCALAR_MAGIC, no_a_zero_point) {
+TEST(QU8_IGEMM_MINMAX_FP32_1X2__SCALAR_FMAGIC, no_a_zero_point) {
for (size_t k = 1; k <= 5; k += 2) {
GemmMicrokernelTester()
.mr(1)
@@ -52722,11 +52722,11 @@
.n(2)
.k(k)
.a_zero_point(0)
- .Test(xnn_qu8_igemm_minmax_fp32_ukernel_1x2__scalar_magic, xnn_init_qu8_conv_minmax_fp32_scalar_magic_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
+ .Test(xnn_qu8_igemm_minmax_fp32_ukernel_1x2__scalar_fmagic, xnn_init_qu8_conv_minmax_fp32_scalar_fmagic_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
}
}
-TEST(QU8_IGEMM_MINMAX_FP32_1X2__SCALAR_MAGIC, no_b_zero_point) {
+TEST(QU8_IGEMM_MINMAX_FP32_1X2__SCALAR_FMAGIC, no_b_zero_point) {
for (size_t k = 1; k <= 5; k += 2) {
GemmMicrokernelTester()
.mr(1)
@@ -52737,11 +52737,11 @@
.n(2)
.k(k)
.b_zero_point(0)
- .Test(xnn_qu8_igemm_minmax_fp32_ukernel_1x2__scalar_magic, xnn_init_qu8_conv_minmax_fp32_scalar_magic_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
+ .Test(xnn_qu8_igemm_minmax_fp32_ukernel_1x2__scalar_fmagic, xnn_init_qu8_conv_minmax_fp32_scalar_fmagic_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
}
}
-TEST(QU8_IGEMM_MINMAX_FP32_1X2__SCALAR_MAGIC, no_zero_point) {
+TEST(QU8_IGEMM_MINMAX_FP32_1X2__SCALAR_FMAGIC, no_zero_point) {
for (size_t k = 1; k <= 5; k += 2) {
GemmMicrokernelTester()
.mr(1)
@@ -52753,11 +52753,11 @@
.k(k)
.a_zero_point(0)
.b_zero_point(0)
- .Test(xnn_qu8_igemm_minmax_fp32_ukernel_1x2__scalar_magic, xnn_init_qu8_conv_minmax_fp32_scalar_magic_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
+ .Test(xnn_qu8_igemm_minmax_fp32_ukernel_1x2__scalar_fmagic, xnn_init_qu8_conv_minmax_fp32_scalar_fmagic_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
}
}
-TEST(QU8_IGEMM_MINMAX_FP32_2X2__SCALAR_MAGIC, k_eq_1) {
+TEST(QU8_IGEMM_MINMAX_FP32_2X2__SCALAR_FMAGIC, k_eq_1) {
GemmMicrokernelTester()
.mr(2)
.nr(2)
@@ -52766,10 +52766,10 @@
.m(2)
.n(2)
.k(1)
- .Test(xnn_qu8_igemm_minmax_fp32_ukernel_2x2__scalar_magic, xnn_init_qu8_conv_minmax_fp32_scalar_magic_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
+ .Test(xnn_qu8_igemm_minmax_fp32_ukernel_2x2__scalar_fmagic, xnn_init_qu8_conv_minmax_fp32_scalar_fmagic_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
}
-TEST(QU8_IGEMM_MINMAX_FP32_2X2__SCALAR_MAGIC, strided_cn) {
+TEST(QU8_IGEMM_MINMAX_FP32_2X2__SCALAR_FMAGIC, strided_cn) {
GemmMicrokernelTester()
.mr(2)
.nr(2)
@@ -52779,10 +52779,10 @@
.n(2)
.k(1)
.cn_stride(5)
- .Test(xnn_qu8_igemm_minmax_fp32_ukernel_2x2__scalar_magic, xnn_init_qu8_conv_minmax_fp32_scalar_magic_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
+ .Test(xnn_qu8_igemm_minmax_fp32_ukernel_2x2__scalar_fmagic, xnn_init_qu8_conv_minmax_fp32_scalar_fmagic_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
}
-TEST(QU8_IGEMM_MINMAX_FP32_2X2__SCALAR_MAGIC, k_eq_1_subtile) {
+TEST(QU8_IGEMM_MINMAX_FP32_2X2__SCALAR_FMAGIC, k_eq_1_subtile) {
for (uint32_t m = 1; m <= 2; m++) {
for (uint32_t n = 1; n <= 2; n++) {
GemmMicrokernelTester()
@@ -52794,12 +52794,12 @@
.n(n)
.k(1)
.iterations(1)
- .Test(xnn_qu8_igemm_minmax_fp32_ukernel_2x2__scalar_magic, xnn_init_qu8_conv_minmax_fp32_scalar_magic_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
+ .Test(xnn_qu8_igemm_minmax_fp32_ukernel_2x2__scalar_fmagic, xnn_init_qu8_conv_minmax_fp32_scalar_fmagic_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
}
}
}
-TEST(QU8_IGEMM_MINMAX_FP32_2X2__SCALAR_MAGIC, k_eq_1_subtile_m) {
+TEST(QU8_IGEMM_MINMAX_FP32_2X2__SCALAR_FMAGIC, k_eq_1_subtile_m) {
for (uint32_t m = 1; m <= 2; m++) {
GemmMicrokernelTester()
.mr(2)
@@ -52810,11 +52810,11 @@
.n(2)
.k(1)
.iterations(1)
- .Test(xnn_qu8_igemm_minmax_fp32_ukernel_2x2__scalar_magic, xnn_init_qu8_conv_minmax_fp32_scalar_magic_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
+ .Test(xnn_qu8_igemm_minmax_fp32_ukernel_2x2__scalar_fmagic, xnn_init_qu8_conv_minmax_fp32_scalar_fmagic_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
}
}
-TEST(QU8_IGEMM_MINMAX_FP32_2X2__SCALAR_MAGIC, k_eq_1_subtile_n) {
+TEST(QU8_IGEMM_MINMAX_FP32_2X2__SCALAR_FMAGIC, k_eq_1_subtile_n) {
for (uint32_t n = 1; n <= 2; n++) {
GemmMicrokernelTester()
.mr(2)
@@ -52825,11 +52825,11 @@
.n(n)
.k(1)
.iterations(1)
- .Test(xnn_qu8_igemm_minmax_fp32_ukernel_2x2__scalar_magic, xnn_init_qu8_conv_minmax_fp32_scalar_magic_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
+ .Test(xnn_qu8_igemm_minmax_fp32_ukernel_2x2__scalar_fmagic, xnn_init_qu8_conv_minmax_fp32_scalar_fmagic_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
}
}
-TEST(QU8_IGEMM_MINMAX_FP32_2X2__SCALAR_MAGIC, k_gt_1) {
+TEST(QU8_IGEMM_MINMAX_FP32_2X2__SCALAR_FMAGIC, k_gt_1) {
for (size_t k = 2; k < 10; k++) {
GemmMicrokernelTester()
.mr(2)
@@ -52839,11 +52839,11 @@
.m(2)
.n(2)
.k(k)
- .Test(xnn_qu8_igemm_minmax_fp32_ukernel_2x2__scalar_magic, xnn_init_qu8_conv_minmax_fp32_scalar_magic_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
+ .Test(xnn_qu8_igemm_minmax_fp32_ukernel_2x2__scalar_fmagic, xnn_init_qu8_conv_minmax_fp32_scalar_fmagic_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
}
}
-TEST(QU8_IGEMM_MINMAX_FP32_2X2__SCALAR_MAGIC, k_gt_1_subtile) {
+TEST(QU8_IGEMM_MINMAX_FP32_2X2__SCALAR_FMAGIC, k_gt_1_subtile) {
for (size_t k = 2; k < 10; k++) {
for (uint32_t m = 1; m <= 2; m++) {
for (uint32_t n = 1; n <= 2; n++) {
@@ -52856,13 +52856,13 @@
.n(n)
.k(k)
.iterations(1)
- .Test(xnn_qu8_igemm_minmax_fp32_ukernel_2x2__scalar_magic, xnn_init_qu8_conv_minmax_fp32_scalar_magic_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
+ .Test(xnn_qu8_igemm_minmax_fp32_ukernel_2x2__scalar_fmagic, xnn_init_qu8_conv_minmax_fp32_scalar_fmagic_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
}
}
}
}
-TEST(QU8_IGEMM_MINMAX_FP32_2X2__SCALAR_MAGIC, n_gt_2) {
+TEST(QU8_IGEMM_MINMAX_FP32_2X2__SCALAR_FMAGIC, n_gt_2) {
for (uint32_t n = 3; n < 4; n++) {
for (size_t k = 1; k <= 5; k += 2) {
GemmMicrokernelTester()
@@ -52873,12 +52873,12 @@
.m(2)
.n(2)
.k(k)
- .Test(xnn_qu8_igemm_minmax_fp32_ukernel_2x2__scalar_magic, xnn_init_qu8_conv_minmax_fp32_scalar_magic_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
+ .Test(xnn_qu8_igemm_minmax_fp32_ukernel_2x2__scalar_fmagic, xnn_init_qu8_conv_minmax_fp32_scalar_fmagic_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
}
}
}
-TEST(QU8_IGEMM_MINMAX_FP32_2X2__SCALAR_MAGIC, n_gt_2_strided_cn) {
+TEST(QU8_IGEMM_MINMAX_FP32_2X2__SCALAR_FMAGIC, n_gt_2_strided_cn) {
for (uint32_t n = 3; n < 4; n++) {
for (size_t k = 1; k <= 5; k += 2) {
GemmMicrokernelTester()
@@ -52890,12 +52890,12 @@
.n(2)
.k(k)
.cn_stride(5)
- .Test(xnn_qu8_igemm_minmax_fp32_ukernel_2x2__scalar_magic, xnn_init_qu8_conv_minmax_fp32_scalar_magic_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
+ .Test(xnn_qu8_igemm_minmax_fp32_ukernel_2x2__scalar_fmagic, xnn_init_qu8_conv_minmax_fp32_scalar_fmagic_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
}
}
}
-TEST(QU8_IGEMM_MINMAX_FP32_2X2__SCALAR_MAGIC, n_gt_2_subtile) {
+TEST(QU8_IGEMM_MINMAX_FP32_2X2__SCALAR_FMAGIC, n_gt_2_subtile) {
for (uint32_t n = 3; n < 4; n++) {
for (size_t k = 1; k <= 5; k += 2) {
for (uint32_t m = 1; m <= 2; m++) {
@@ -52908,13 +52908,13 @@
.n(n)
.k(k)
.iterations(1)
- .Test(xnn_qu8_igemm_minmax_fp32_ukernel_2x2__scalar_magic, xnn_init_qu8_conv_minmax_fp32_scalar_magic_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
+ .Test(xnn_qu8_igemm_minmax_fp32_ukernel_2x2__scalar_fmagic, xnn_init_qu8_conv_minmax_fp32_scalar_fmagic_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
}
}
}
}
-TEST(QU8_IGEMM_MINMAX_FP32_2X2__SCALAR_MAGIC, n_div_2) {
+TEST(QU8_IGEMM_MINMAX_FP32_2X2__SCALAR_FMAGIC, n_div_2) {
for (uint32_t n = 4; n <= 6; n += 2) {
for (size_t k = 1; k <= 5; k += 2) {
GemmMicrokernelTester()
@@ -52925,12 +52925,12 @@
.m(2)
.n(2)
.k(k)
- .Test(xnn_qu8_igemm_minmax_fp32_ukernel_2x2__scalar_magic, xnn_init_qu8_conv_minmax_fp32_scalar_magic_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
+ .Test(xnn_qu8_igemm_minmax_fp32_ukernel_2x2__scalar_fmagic, xnn_init_qu8_conv_minmax_fp32_scalar_fmagic_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
}
}
}
-TEST(QU8_IGEMM_MINMAX_FP32_2X2__SCALAR_MAGIC, n_div_2_strided_cn) {
+TEST(QU8_IGEMM_MINMAX_FP32_2X2__SCALAR_FMAGIC, n_div_2_strided_cn) {
for (uint32_t n = 4; n <= 6; n += 2) {
for (size_t k = 1; k <= 5; k += 2) {
GemmMicrokernelTester()
@@ -52942,12 +52942,12 @@
.n(n)
.k(k)
.cn_stride(5)
- .Test(xnn_qu8_igemm_minmax_fp32_ukernel_2x2__scalar_magic, xnn_init_qu8_conv_minmax_fp32_scalar_magic_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
+ .Test(xnn_qu8_igemm_minmax_fp32_ukernel_2x2__scalar_fmagic, xnn_init_qu8_conv_minmax_fp32_scalar_fmagic_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
}
}
}
-TEST(QU8_IGEMM_MINMAX_FP32_2X2__SCALAR_MAGIC, n_div_2_subtile) {
+TEST(QU8_IGEMM_MINMAX_FP32_2X2__SCALAR_FMAGIC, n_div_2_subtile) {
for (uint32_t n = 4; n <= 6; n += 2) {
for (size_t k = 1; k <= 5; k += 2) {
for (uint32_t m = 1; m <= 2; m++) {
@@ -52960,13 +52960,13 @@
.n(n)
.k(k)
.iterations(1)
- .Test(xnn_qu8_igemm_minmax_fp32_ukernel_2x2__scalar_magic, xnn_init_qu8_conv_minmax_fp32_scalar_magic_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
+ .Test(xnn_qu8_igemm_minmax_fp32_ukernel_2x2__scalar_fmagic, xnn_init_qu8_conv_minmax_fp32_scalar_fmagic_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
}
}
}
}
-TEST(QU8_IGEMM_MINMAX_FP32_2X2__SCALAR_MAGIC, small_kernel) {
+TEST(QU8_IGEMM_MINMAX_FP32_2X2__SCALAR_FMAGIC, small_kernel) {
for (size_t k = 1; k <= 5; k += 2) {
GemmMicrokernelTester()
.mr(2)
@@ -52977,11 +52977,11 @@
.n(2)
.k(k)
.ks(3)
- .Test(xnn_qu8_igemm_minmax_fp32_ukernel_2x2__scalar_magic, xnn_init_qu8_conv_minmax_fp32_scalar_magic_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
+ .Test(xnn_qu8_igemm_minmax_fp32_ukernel_2x2__scalar_fmagic, xnn_init_qu8_conv_minmax_fp32_scalar_fmagic_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
}
}
-TEST(QU8_IGEMM_MINMAX_FP32_2X2__SCALAR_MAGIC, small_kernel_subtile) {
+TEST(QU8_IGEMM_MINMAX_FP32_2X2__SCALAR_FMAGIC, small_kernel_subtile) {
for (size_t k = 1; k <= 5; k += 2) {
for (uint32_t m = 1; m <= 2; m++) {
for (uint32_t n = 1; n <= 2; n++) {
@@ -52995,13 +52995,13 @@
.k(k)
.ks(3)
.iterations(1)
- .Test(xnn_qu8_igemm_minmax_fp32_ukernel_2x2__scalar_magic, xnn_init_qu8_conv_minmax_fp32_scalar_magic_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
+ .Test(xnn_qu8_igemm_minmax_fp32_ukernel_2x2__scalar_fmagic, xnn_init_qu8_conv_minmax_fp32_scalar_fmagic_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
}
}
}
}
-TEST(QU8_IGEMM_MINMAX_FP32_2X2__SCALAR_MAGIC, n_gt_2_small_kernel) {
+TEST(QU8_IGEMM_MINMAX_FP32_2X2__SCALAR_FMAGIC, n_gt_2_small_kernel) {
for (uint32_t n = 3; n < 4; n++) {
for (size_t k = 1; k <= 5; k += 2) {
GemmMicrokernelTester()
@@ -53013,12 +53013,12 @@
.n(2)
.k(k)
.ks(3)
- .Test(xnn_qu8_igemm_minmax_fp32_ukernel_2x2__scalar_magic, xnn_init_qu8_conv_minmax_fp32_scalar_magic_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
+ .Test(xnn_qu8_igemm_minmax_fp32_ukernel_2x2__scalar_fmagic, xnn_init_qu8_conv_minmax_fp32_scalar_fmagic_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
}
}
}
-TEST(QU8_IGEMM_MINMAX_FP32_2X2__SCALAR_MAGIC, n_div_2_small_kernel) {
+TEST(QU8_IGEMM_MINMAX_FP32_2X2__SCALAR_FMAGIC, n_div_2_small_kernel) {
for (uint32_t n = 4; n <= 6; n += 2) {
for (size_t k = 1; k <= 5; k += 2) {
GemmMicrokernelTester()
@@ -53030,12 +53030,12 @@
.n(2)
.k(k)
.ks(3)
- .Test(xnn_qu8_igemm_minmax_fp32_ukernel_2x2__scalar_magic, xnn_init_qu8_conv_minmax_fp32_scalar_magic_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
+ .Test(xnn_qu8_igemm_minmax_fp32_ukernel_2x2__scalar_fmagic, xnn_init_qu8_conv_minmax_fp32_scalar_fmagic_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
}
}
}
-TEST(QU8_IGEMM_MINMAX_FP32_2X2__SCALAR_MAGIC, strided_cm_subtile) {
+TEST(QU8_IGEMM_MINMAX_FP32_2X2__SCALAR_FMAGIC, strided_cm_subtile) {
for (size_t k = 1; k <= 5; k += 2) {
for (uint32_t m = 1; m <= 2; m++) {
for (uint32_t n = 1; n <= 2; n++) {
@@ -53049,13 +53049,13 @@
.k(k)
.cm_stride(5)
.iterations(1)
- .Test(xnn_qu8_igemm_minmax_fp32_ukernel_2x2__scalar_magic, xnn_init_qu8_conv_minmax_fp32_scalar_magic_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
+ .Test(xnn_qu8_igemm_minmax_fp32_ukernel_2x2__scalar_fmagic, xnn_init_qu8_conv_minmax_fp32_scalar_fmagic_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
}
}
}
}
-TEST(QU8_IGEMM_MINMAX_FP32_2X2__SCALAR_MAGIC, a_offset) {
+TEST(QU8_IGEMM_MINMAX_FP32_2X2__SCALAR_FMAGIC, a_offset) {
for (size_t k = 1; k <= 5; k += 2) {
GemmMicrokernelTester()
.mr(2)
@@ -53067,11 +53067,11 @@
.k(k)
.ks(3)
.a_offset(13)
- .Test(xnn_qu8_igemm_minmax_fp32_ukernel_2x2__scalar_magic, xnn_init_qu8_conv_minmax_fp32_scalar_magic_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
+ .Test(xnn_qu8_igemm_minmax_fp32_ukernel_2x2__scalar_fmagic, xnn_init_qu8_conv_minmax_fp32_scalar_fmagic_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
}
}
-TEST(QU8_IGEMM_MINMAX_FP32_2X2__SCALAR_MAGIC, zero) {
+TEST(QU8_IGEMM_MINMAX_FP32_2X2__SCALAR_FMAGIC, zero) {
for (uint32_t mz = 0; mz < 2; mz++) {
for (size_t k = 1; k <= 5; k += 2) {
GemmMicrokernelTester()
@@ -53085,12 +53085,12 @@
.ks(3)
.a_offset(13)
.zero_index(mz)
- .Test(xnn_qu8_igemm_minmax_fp32_ukernel_2x2__scalar_magic, xnn_init_qu8_conv_minmax_fp32_scalar_magic_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
+ .Test(xnn_qu8_igemm_minmax_fp32_ukernel_2x2__scalar_fmagic, xnn_init_qu8_conv_minmax_fp32_scalar_fmagic_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
}
}
}
-TEST(QU8_IGEMM_MINMAX_FP32_2X2__SCALAR_MAGIC, qmin) {
+TEST(QU8_IGEMM_MINMAX_FP32_2X2__SCALAR_FMAGIC, qmin) {
GemmMicrokernelTester()
.mr(2)
.nr(2)
@@ -53100,10 +53100,10 @@
.n(2)
.k(1)
.qmin(128)
- .Test(xnn_qu8_igemm_minmax_fp32_ukernel_2x2__scalar_magic, xnn_init_qu8_conv_minmax_fp32_scalar_magic_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
+ .Test(xnn_qu8_igemm_minmax_fp32_ukernel_2x2__scalar_fmagic, xnn_init_qu8_conv_minmax_fp32_scalar_fmagic_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
}
-TEST(QU8_IGEMM_MINMAX_FP32_2X2__SCALAR_MAGIC, qmax) {
+TEST(QU8_IGEMM_MINMAX_FP32_2X2__SCALAR_FMAGIC, qmax) {
GemmMicrokernelTester()
.mr(2)
.nr(2)
@@ -53113,10 +53113,10 @@
.n(2)
.k(1)
.qmax(128)
- .Test(xnn_qu8_igemm_minmax_fp32_ukernel_2x2__scalar_magic, xnn_init_qu8_conv_minmax_fp32_scalar_magic_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
+ .Test(xnn_qu8_igemm_minmax_fp32_ukernel_2x2__scalar_fmagic, xnn_init_qu8_conv_minmax_fp32_scalar_fmagic_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
}
-TEST(QU8_IGEMM_MINMAX_FP32_2X2__SCALAR_MAGIC, strided_cm) {
+TEST(QU8_IGEMM_MINMAX_FP32_2X2__SCALAR_FMAGIC, strided_cm) {
GemmMicrokernelTester()
.mr(2)
.nr(2)
@@ -53126,10 +53126,10 @@
.n(2)
.k(1)
.cm_stride(5)
- .Test(xnn_qu8_igemm_minmax_fp32_ukernel_2x2__scalar_magic, xnn_init_qu8_conv_minmax_fp32_scalar_magic_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
+ .Test(xnn_qu8_igemm_minmax_fp32_ukernel_2x2__scalar_fmagic, xnn_init_qu8_conv_minmax_fp32_scalar_fmagic_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
}
-TEST(QU8_IGEMM_MINMAX_FP32_2X2__SCALAR_MAGIC, no_a_zero_point) {
+TEST(QU8_IGEMM_MINMAX_FP32_2X2__SCALAR_FMAGIC, no_a_zero_point) {
for (size_t k = 1; k <= 5; k += 2) {
GemmMicrokernelTester()
.mr(2)
@@ -53140,11 +53140,11 @@
.n(2)
.k(k)
.a_zero_point(0)
- .Test(xnn_qu8_igemm_minmax_fp32_ukernel_2x2__scalar_magic, xnn_init_qu8_conv_minmax_fp32_scalar_magic_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
+ .Test(xnn_qu8_igemm_minmax_fp32_ukernel_2x2__scalar_fmagic, xnn_init_qu8_conv_minmax_fp32_scalar_fmagic_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
}
}
-TEST(QU8_IGEMM_MINMAX_FP32_2X2__SCALAR_MAGIC, no_b_zero_point) {
+TEST(QU8_IGEMM_MINMAX_FP32_2X2__SCALAR_FMAGIC, no_b_zero_point) {
for (size_t k = 1; k <= 5; k += 2) {
GemmMicrokernelTester()
.mr(2)
@@ -53155,11 +53155,11 @@
.n(2)
.k(k)
.b_zero_point(0)
- .Test(xnn_qu8_igemm_minmax_fp32_ukernel_2x2__scalar_magic, xnn_init_qu8_conv_minmax_fp32_scalar_magic_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
+ .Test(xnn_qu8_igemm_minmax_fp32_ukernel_2x2__scalar_fmagic, xnn_init_qu8_conv_minmax_fp32_scalar_fmagic_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
}
}
-TEST(QU8_IGEMM_MINMAX_FP32_2X2__SCALAR_MAGIC, no_zero_point) {
+TEST(QU8_IGEMM_MINMAX_FP32_2X2__SCALAR_FMAGIC, no_zero_point) {
for (size_t k = 1; k <= 5; k += 2) {
GemmMicrokernelTester()
.mr(2)
@@ -53171,11 +53171,11 @@
.k(k)
.a_zero_point(0)
.b_zero_point(0)
- .Test(xnn_qu8_igemm_minmax_fp32_ukernel_2x2__scalar_magic, xnn_init_qu8_conv_minmax_fp32_scalar_magic_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
+ .Test(xnn_qu8_igemm_minmax_fp32_ukernel_2x2__scalar_fmagic, xnn_init_qu8_conv_minmax_fp32_scalar_fmagic_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
}
}
-TEST(QU8_IGEMM_MINMAX_FP32_3X2__SCALAR_MAGIC, k_eq_1) {
+TEST(QU8_IGEMM_MINMAX_FP32_3X2__SCALAR_FMAGIC, k_eq_1) {
GemmMicrokernelTester()
.mr(3)
.nr(2)
@@ -53184,10 +53184,10 @@
.m(3)
.n(2)
.k(1)
- .Test(xnn_qu8_igemm_minmax_fp32_ukernel_3x2__scalar_magic, xnn_init_qu8_conv_minmax_fp32_scalar_magic_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
+ .Test(xnn_qu8_igemm_minmax_fp32_ukernel_3x2__scalar_fmagic, xnn_init_qu8_conv_minmax_fp32_scalar_fmagic_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
}
-TEST(QU8_IGEMM_MINMAX_FP32_3X2__SCALAR_MAGIC, strided_cn) {
+TEST(QU8_IGEMM_MINMAX_FP32_3X2__SCALAR_FMAGIC, strided_cn) {
GemmMicrokernelTester()
.mr(3)
.nr(2)
@@ -53197,10 +53197,10 @@
.n(2)
.k(1)
.cn_stride(5)
- .Test(xnn_qu8_igemm_minmax_fp32_ukernel_3x2__scalar_magic, xnn_init_qu8_conv_minmax_fp32_scalar_magic_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
+ .Test(xnn_qu8_igemm_minmax_fp32_ukernel_3x2__scalar_fmagic, xnn_init_qu8_conv_minmax_fp32_scalar_fmagic_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
}
-TEST(QU8_IGEMM_MINMAX_FP32_3X2__SCALAR_MAGIC, k_eq_1_subtile) {
+TEST(QU8_IGEMM_MINMAX_FP32_3X2__SCALAR_FMAGIC, k_eq_1_subtile) {
for (uint32_t m = 1; m <= 3; m++) {
for (uint32_t n = 1; n <= 2; n++) {
GemmMicrokernelTester()
@@ -53212,12 +53212,12 @@
.n(n)
.k(1)
.iterations(1)
- .Test(xnn_qu8_igemm_minmax_fp32_ukernel_3x2__scalar_magic, xnn_init_qu8_conv_minmax_fp32_scalar_magic_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
+ .Test(xnn_qu8_igemm_minmax_fp32_ukernel_3x2__scalar_fmagic, xnn_init_qu8_conv_minmax_fp32_scalar_fmagic_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
}
}
}
-TEST(QU8_IGEMM_MINMAX_FP32_3X2__SCALAR_MAGIC, k_eq_1_subtile_m) {
+TEST(QU8_IGEMM_MINMAX_FP32_3X2__SCALAR_FMAGIC, k_eq_1_subtile_m) {
for (uint32_t m = 1; m <= 3; m++) {
GemmMicrokernelTester()
.mr(3)
@@ -53228,11 +53228,11 @@
.n(2)
.k(1)
.iterations(1)
- .Test(xnn_qu8_igemm_minmax_fp32_ukernel_3x2__scalar_magic, xnn_init_qu8_conv_minmax_fp32_scalar_magic_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
+ .Test(xnn_qu8_igemm_minmax_fp32_ukernel_3x2__scalar_fmagic, xnn_init_qu8_conv_minmax_fp32_scalar_fmagic_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
}
}
-TEST(QU8_IGEMM_MINMAX_FP32_3X2__SCALAR_MAGIC, k_eq_1_subtile_n) {
+TEST(QU8_IGEMM_MINMAX_FP32_3X2__SCALAR_FMAGIC, k_eq_1_subtile_n) {
for (uint32_t n = 1; n <= 2; n++) {
GemmMicrokernelTester()
.mr(3)
@@ -53243,11 +53243,11 @@
.n(n)
.k(1)
.iterations(1)
- .Test(xnn_qu8_igemm_minmax_fp32_ukernel_3x2__scalar_magic, xnn_init_qu8_conv_minmax_fp32_scalar_magic_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
+ .Test(xnn_qu8_igemm_minmax_fp32_ukernel_3x2__scalar_fmagic, xnn_init_qu8_conv_minmax_fp32_scalar_fmagic_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
}
}
-TEST(QU8_IGEMM_MINMAX_FP32_3X2__SCALAR_MAGIC, k_gt_1) {
+TEST(QU8_IGEMM_MINMAX_FP32_3X2__SCALAR_FMAGIC, k_gt_1) {
for (size_t k = 2; k < 10; k++) {
GemmMicrokernelTester()
.mr(3)
@@ -53257,11 +53257,11 @@
.m(3)
.n(2)
.k(k)
- .Test(xnn_qu8_igemm_minmax_fp32_ukernel_3x2__scalar_magic, xnn_init_qu8_conv_minmax_fp32_scalar_magic_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
+ .Test(xnn_qu8_igemm_minmax_fp32_ukernel_3x2__scalar_fmagic, xnn_init_qu8_conv_minmax_fp32_scalar_fmagic_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
}
}
-TEST(QU8_IGEMM_MINMAX_FP32_3X2__SCALAR_MAGIC, k_gt_1_subtile) {
+TEST(QU8_IGEMM_MINMAX_FP32_3X2__SCALAR_FMAGIC, k_gt_1_subtile) {
for (size_t k = 2; k < 10; k++) {
for (uint32_t m = 1; m <= 3; m++) {
for (uint32_t n = 1; n <= 2; n++) {
@@ -53274,13 +53274,13 @@
.n(n)
.k(k)
.iterations(1)
- .Test(xnn_qu8_igemm_minmax_fp32_ukernel_3x2__scalar_magic, xnn_init_qu8_conv_minmax_fp32_scalar_magic_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
+ .Test(xnn_qu8_igemm_minmax_fp32_ukernel_3x2__scalar_fmagic, xnn_init_qu8_conv_minmax_fp32_scalar_fmagic_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
}
}
}
}
-TEST(QU8_IGEMM_MINMAX_FP32_3X2__SCALAR_MAGIC, n_gt_2) {
+TEST(QU8_IGEMM_MINMAX_FP32_3X2__SCALAR_FMAGIC, n_gt_2) {
for (uint32_t n = 3; n < 4; n++) {
for (size_t k = 1; k <= 5; k += 2) {
GemmMicrokernelTester()
@@ -53291,12 +53291,12 @@
.m(3)
.n(2)
.k(k)
- .Test(xnn_qu8_igemm_minmax_fp32_ukernel_3x2__scalar_magic, xnn_init_qu8_conv_minmax_fp32_scalar_magic_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
+ .Test(xnn_qu8_igemm_minmax_fp32_ukernel_3x2__scalar_fmagic, xnn_init_qu8_conv_minmax_fp32_scalar_fmagic_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
}
}
}
-TEST(QU8_IGEMM_MINMAX_FP32_3X2__SCALAR_MAGIC, n_gt_2_strided_cn) {
+TEST(QU8_IGEMM_MINMAX_FP32_3X2__SCALAR_FMAGIC, n_gt_2_strided_cn) {
for (uint32_t n = 3; n < 4; n++) {
for (size_t k = 1; k <= 5; k += 2) {
GemmMicrokernelTester()
@@ -53308,12 +53308,12 @@
.n(2)
.k(k)
.cn_stride(5)
- .Test(xnn_qu8_igemm_minmax_fp32_ukernel_3x2__scalar_magic, xnn_init_qu8_conv_minmax_fp32_scalar_magic_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
+ .Test(xnn_qu8_igemm_minmax_fp32_ukernel_3x2__scalar_fmagic, xnn_init_qu8_conv_minmax_fp32_scalar_fmagic_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
}
}
}
-TEST(QU8_IGEMM_MINMAX_FP32_3X2__SCALAR_MAGIC, n_gt_2_subtile) {
+TEST(QU8_IGEMM_MINMAX_FP32_3X2__SCALAR_FMAGIC, n_gt_2_subtile) {
for (uint32_t n = 3; n < 4; n++) {
for (size_t k = 1; k <= 5; k += 2) {
for (uint32_t m = 1; m <= 3; m++) {
@@ -53326,13 +53326,13 @@
.n(n)
.k(k)
.iterations(1)
- .Test(xnn_qu8_igemm_minmax_fp32_ukernel_3x2__scalar_magic, xnn_init_qu8_conv_minmax_fp32_scalar_magic_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
+ .Test(xnn_qu8_igemm_minmax_fp32_ukernel_3x2__scalar_fmagic, xnn_init_qu8_conv_minmax_fp32_scalar_fmagic_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
}
}
}
}
-TEST(QU8_IGEMM_MINMAX_FP32_3X2__SCALAR_MAGIC, n_div_2) {
+TEST(QU8_IGEMM_MINMAX_FP32_3X2__SCALAR_FMAGIC, n_div_2) {
for (uint32_t n = 4; n <= 6; n += 2) {
for (size_t k = 1; k <= 5; k += 2) {
GemmMicrokernelTester()
@@ -53343,12 +53343,12 @@
.m(3)
.n(2)
.k(k)
- .Test(xnn_qu8_igemm_minmax_fp32_ukernel_3x2__scalar_magic, xnn_init_qu8_conv_minmax_fp32_scalar_magic_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
+ .Test(xnn_qu8_igemm_minmax_fp32_ukernel_3x2__scalar_fmagic, xnn_init_qu8_conv_minmax_fp32_scalar_fmagic_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
}
}
}
-TEST(QU8_IGEMM_MINMAX_FP32_3X2__SCALAR_MAGIC, n_div_2_strided_cn) {
+TEST(QU8_IGEMM_MINMAX_FP32_3X2__SCALAR_FMAGIC, n_div_2_strided_cn) {
for (uint32_t n = 4; n <= 6; n += 2) {
for (size_t k = 1; k <= 5; k += 2) {
GemmMicrokernelTester()
@@ -53360,12 +53360,12 @@
.n(n)
.k(k)
.cn_stride(5)
- .Test(xnn_qu8_igemm_minmax_fp32_ukernel_3x2__scalar_magic, xnn_init_qu8_conv_minmax_fp32_scalar_magic_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
+ .Test(xnn_qu8_igemm_minmax_fp32_ukernel_3x2__scalar_fmagic, xnn_init_qu8_conv_minmax_fp32_scalar_fmagic_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
}
}
}
-TEST(QU8_IGEMM_MINMAX_FP32_3X2__SCALAR_MAGIC, n_div_2_subtile) {
+TEST(QU8_IGEMM_MINMAX_FP32_3X2__SCALAR_FMAGIC, n_div_2_subtile) {
for (uint32_t n = 4; n <= 6; n += 2) {
for (size_t k = 1; k <= 5; k += 2) {
for (uint32_t m = 1; m <= 3; m++) {
@@ -53378,13 +53378,13 @@
.n(n)
.k(k)
.iterations(1)
- .Test(xnn_qu8_igemm_minmax_fp32_ukernel_3x2__scalar_magic, xnn_init_qu8_conv_minmax_fp32_scalar_magic_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
+ .Test(xnn_qu8_igemm_minmax_fp32_ukernel_3x2__scalar_fmagic, xnn_init_qu8_conv_minmax_fp32_scalar_fmagic_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
}
}
}
}
-TEST(QU8_IGEMM_MINMAX_FP32_3X2__SCALAR_MAGIC, small_kernel) {
+TEST(QU8_IGEMM_MINMAX_FP32_3X2__SCALAR_FMAGIC, small_kernel) {
for (size_t k = 1; k <= 5; k += 2) {
GemmMicrokernelTester()
.mr(3)
@@ -53395,11 +53395,11 @@
.n(2)
.k(k)
.ks(3)
- .Test(xnn_qu8_igemm_minmax_fp32_ukernel_3x2__scalar_magic, xnn_init_qu8_conv_minmax_fp32_scalar_magic_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
+ .Test(xnn_qu8_igemm_minmax_fp32_ukernel_3x2__scalar_fmagic, xnn_init_qu8_conv_minmax_fp32_scalar_fmagic_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
}
}
-TEST(QU8_IGEMM_MINMAX_FP32_3X2__SCALAR_MAGIC, small_kernel_subtile) {
+TEST(QU8_IGEMM_MINMAX_FP32_3X2__SCALAR_FMAGIC, small_kernel_subtile) {
for (size_t k = 1; k <= 5; k += 2) {
for (uint32_t m = 1; m <= 3; m++) {
for (uint32_t n = 1; n <= 2; n++) {
@@ -53413,13 +53413,13 @@
.k(k)
.ks(3)
.iterations(1)
- .Test(xnn_qu8_igemm_minmax_fp32_ukernel_3x2__scalar_magic, xnn_init_qu8_conv_minmax_fp32_scalar_magic_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
+ .Test(xnn_qu8_igemm_minmax_fp32_ukernel_3x2__scalar_fmagic, xnn_init_qu8_conv_minmax_fp32_scalar_fmagic_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
}
}
}
}
-TEST(QU8_IGEMM_MINMAX_FP32_3X2__SCALAR_MAGIC, n_gt_2_small_kernel) {
+TEST(QU8_IGEMM_MINMAX_FP32_3X2__SCALAR_FMAGIC, n_gt_2_small_kernel) {
for (uint32_t n = 3; n < 4; n++) {
for (size_t k = 1; k <= 5; k += 2) {
GemmMicrokernelTester()
@@ -53431,12 +53431,12 @@
.n(2)
.k(k)
.ks(3)
- .Test(xnn_qu8_igemm_minmax_fp32_ukernel_3x2__scalar_magic, xnn_init_qu8_conv_minmax_fp32_scalar_magic_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
+ .Test(xnn_qu8_igemm_minmax_fp32_ukernel_3x2__scalar_fmagic, xnn_init_qu8_conv_minmax_fp32_scalar_fmagic_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
}
}
}
-TEST(QU8_IGEMM_MINMAX_FP32_3X2__SCALAR_MAGIC, n_div_2_small_kernel) {
+TEST(QU8_IGEMM_MINMAX_FP32_3X2__SCALAR_FMAGIC, n_div_2_small_kernel) {
for (uint32_t n = 4; n <= 6; n += 2) {
for (size_t k = 1; k <= 5; k += 2) {
GemmMicrokernelTester()
@@ -53448,12 +53448,12 @@
.n(2)
.k(k)
.ks(3)
- .Test(xnn_qu8_igemm_minmax_fp32_ukernel_3x2__scalar_magic, xnn_init_qu8_conv_minmax_fp32_scalar_magic_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
+ .Test(xnn_qu8_igemm_minmax_fp32_ukernel_3x2__scalar_fmagic, xnn_init_qu8_conv_minmax_fp32_scalar_fmagic_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
}
}
}
-TEST(QU8_IGEMM_MINMAX_FP32_3X2__SCALAR_MAGIC, strided_cm_subtile) {
+TEST(QU8_IGEMM_MINMAX_FP32_3X2__SCALAR_FMAGIC, strided_cm_subtile) {
for (size_t k = 1; k <= 5; k += 2) {
for (uint32_t m = 1; m <= 3; m++) {
for (uint32_t n = 1; n <= 2; n++) {
@@ -53467,13 +53467,13 @@
.k(k)
.cm_stride(5)
.iterations(1)
- .Test(xnn_qu8_igemm_minmax_fp32_ukernel_3x2__scalar_magic, xnn_init_qu8_conv_minmax_fp32_scalar_magic_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
+ .Test(xnn_qu8_igemm_minmax_fp32_ukernel_3x2__scalar_fmagic, xnn_init_qu8_conv_minmax_fp32_scalar_fmagic_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
}
}
}
}
-TEST(QU8_IGEMM_MINMAX_FP32_3X2__SCALAR_MAGIC, a_offset) {
+TEST(QU8_IGEMM_MINMAX_FP32_3X2__SCALAR_FMAGIC, a_offset) {
for (size_t k = 1; k <= 5; k += 2) {
GemmMicrokernelTester()
.mr(3)
@@ -53485,11 +53485,11 @@
.k(k)
.ks(3)
.a_offset(17)
- .Test(xnn_qu8_igemm_minmax_fp32_ukernel_3x2__scalar_magic, xnn_init_qu8_conv_minmax_fp32_scalar_magic_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
+ .Test(xnn_qu8_igemm_minmax_fp32_ukernel_3x2__scalar_fmagic, xnn_init_qu8_conv_minmax_fp32_scalar_fmagic_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
}
}
-TEST(QU8_IGEMM_MINMAX_FP32_3X2__SCALAR_MAGIC, zero) {
+TEST(QU8_IGEMM_MINMAX_FP32_3X2__SCALAR_FMAGIC, zero) {
for (uint32_t mz = 0; mz < 3; mz++) {
for (size_t k = 1; k <= 5; k += 2) {
GemmMicrokernelTester()
@@ -53503,12 +53503,12 @@
.ks(3)
.a_offset(17)
.zero_index(mz)
- .Test(xnn_qu8_igemm_minmax_fp32_ukernel_3x2__scalar_magic, xnn_init_qu8_conv_minmax_fp32_scalar_magic_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
+ .Test(xnn_qu8_igemm_minmax_fp32_ukernel_3x2__scalar_fmagic, xnn_init_qu8_conv_minmax_fp32_scalar_fmagic_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
}
}
}
-TEST(QU8_IGEMM_MINMAX_FP32_3X2__SCALAR_MAGIC, qmin) {
+TEST(QU8_IGEMM_MINMAX_FP32_3X2__SCALAR_FMAGIC, qmin) {
GemmMicrokernelTester()
.mr(3)
.nr(2)
@@ -53518,10 +53518,10 @@
.n(2)
.k(1)
.qmin(128)
- .Test(xnn_qu8_igemm_minmax_fp32_ukernel_3x2__scalar_magic, xnn_init_qu8_conv_minmax_fp32_scalar_magic_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
+ .Test(xnn_qu8_igemm_minmax_fp32_ukernel_3x2__scalar_fmagic, xnn_init_qu8_conv_minmax_fp32_scalar_fmagic_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
}
-TEST(QU8_IGEMM_MINMAX_FP32_3X2__SCALAR_MAGIC, qmax) {
+TEST(QU8_IGEMM_MINMAX_FP32_3X2__SCALAR_FMAGIC, qmax) {
GemmMicrokernelTester()
.mr(3)
.nr(2)
@@ -53531,10 +53531,10 @@
.n(2)
.k(1)
.qmax(128)
- .Test(xnn_qu8_igemm_minmax_fp32_ukernel_3x2__scalar_magic, xnn_init_qu8_conv_minmax_fp32_scalar_magic_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
+ .Test(xnn_qu8_igemm_minmax_fp32_ukernel_3x2__scalar_fmagic, xnn_init_qu8_conv_minmax_fp32_scalar_fmagic_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
}
-TEST(QU8_IGEMM_MINMAX_FP32_3X2__SCALAR_MAGIC, strided_cm) {
+TEST(QU8_IGEMM_MINMAX_FP32_3X2__SCALAR_FMAGIC, strided_cm) {
GemmMicrokernelTester()
.mr(3)
.nr(2)
@@ -53544,10 +53544,10 @@
.n(2)
.k(1)
.cm_stride(5)
- .Test(xnn_qu8_igemm_minmax_fp32_ukernel_3x2__scalar_magic, xnn_init_qu8_conv_minmax_fp32_scalar_magic_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
+ .Test(xnn_qu8_igemm_minmax_fp32_ukernel_3x2__scalar_fmagic, xnn_init_qu8_conv_minmax_fp32_scalar_fmagic_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
}
-TEST(QU8_IGEMM_MINMAX_FP32_3X2__SCALAR_MAGIC, no_a_zero_point) {
+TEST(QU8_IGEMM_MINMAX_FP32_3X2__SCALAR_FMAGIC, no_a_zero_point) {
for (size_t k = 1; k <= 5; k += 2) {
GemmMicrokernelTester()
.mr(3)
@@ -53558,11 +53558,11 @@
.n(2)
.k(k)
.a_zero_point(0)
- .Test(xnn_qu8_igemm_minmax_fp32_ukernel_3x2__scalar_magic, xnn_init_qu8_conv_minmax_fp32_scalar_magic_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
+ .Test(xnn_qu8_igemm_minmax_fp32_ukernel_3x2__scalar_fmagic, xnn_init_qu8_conv_minmax_fp32_scalar_fmagic_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
}
}
-TEST(QU8_IGEMM_MINMAX_FP32_3X2__SCALAR_MAGIC, no_b_zero_point) {
+TEST(QU8_IGEMM_MINMAX_FP32_3X2__SCALAR_FMAGIC, no_b_zero_point) {
for (size_t k = 1; k <= 5; k += 2) {
GemmMicrokernelTester()
.mr(3)
@@ -53573,11 +53573,11 @@
.n(2)
.k(k)
.b_zero_point(0)
- .Test(xnn_qu8_igemm_minmax_fp32_ukernel_3x2__scalar_magic, xnn_init_qu8_conv_minmax_fp32_scalar_magic_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
+ .Test(xnn_qu8_igemm_minmax_fp32_ukernel_3x2__scalar_fmagic, xnn_init_qu8_conv_minmax_fp32_scalar_fmagic_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
}
}
-TEST(QU8_IGEMM_MINMAX_FP32_3X2__SCALAR_MAGIC, no_zero_point) {
+TEST(QU8_IGEMM_MINMAX_FP32_3X2__SCALAR_FMAGIC, no_zero_point) {
for (size_t k = 1; k <= 5; k += 2) {
GemmMicrokernelTester()
.mr(3)
@@ -53589,11 +53589,11 @@
.k(k)
.a_zero_point(0)
.b_zero_point(0)
- .Test(xnn_qu8_igemm_minmax_fp32_ukernel_3x2__scalar_magic, xnn_init_qu8_conv_minmax_fp32_scalar_magic_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
+ .Test(xnn_qu8_igemm_minmax_fp32_ukernel_3x2__scalar_fmagic, xnn_init_qu8_conv_minmax_fp32_scalar_fmagic_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
}
}
-TEST(QU8_IGEMM_MINMAX_FP32_4X2__SCALAR_MAGIC, k_eq_1) {
+TEST(QU8_IGEMM_MINMAX_FP32_4X2__SCALAR_FMAGIC, k_eq_1) {
GemmMicrokernelTester()
.mr(4)
.nr(2)
@@ -53602,10 +53602,10 @@
.m(4)
.n(2)
.k(1)
- .Test(xnn_qu8_igemm_minmax_fp32_ukernel_4x2__scalar_magic, xnn_init_qu8_conv_minmax_fp32_scalar_magic_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
+ .Test(xnn_qu8_igemm_minmax_fp32_ukernel_4x2__scalar_fmagic, xnn_init_qu8_conv_minmax_fp32_scalar_fmagic_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
}
-TEST(QU8_IGEMM_MINMAX_FP32_4X2__SCALAR_MAGIC, strided_cn) {
+TEST(QU8_IGEMM_MINMAX_FP32_4X2__SCALAR_FMAGIC, strided_cn) {
GemmMicrokernelTester()
.mr(4)
.nr(2)
@@ -53615,10 +53615,10 @@
.n(2)
.k(1)
.cn_stride(5)
- .Test(xnn_qu8_igemm_minmax_fp32_ukernel_4x2__scalar_magic, xnn_init_qu8_conv_minmax_fp32_scalar_magic_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
+ .Test(xnn_qu8_igemm_minmax_fp32_ukernel_4x2__scalar_fmagic, xnn_init_qu8_conv_minmax_fp32_scalar_fmagic_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
}
-TEST(QU8_IGEMM_MINMAX_FP32_4X2__SCALAR_MAGIC, k_eq_1_subtile) {
+TEST(QU8_IGEMM_MINMAX_FP32_4X2__SCALAR_FMAGIC, k_eq_1_subtile) {
for (uint32_t m = 1; m <= 4; m++) {
for (uint32_t n = 1; n <= 2; n++) {
GemmMicrokernelTester()
@@ -53630,12 +53630,12 @@
.n(n)
.k(1)
.iterations(1)
- .Test(xnn_qu8_igemm_minmax_fp32_ukernel_4x2__scalar_magic, xnn_init_qu8_conv_minmax_fp32_scalar_magic_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
+ .Test(xnn_qu8_igemm_minmax_fp32_ukernel_4x2__scalar_fmagic, xnn_init_qu8_conv_minmax_fp32_scalar_fmagic_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
}
}
}
-TEST(QU8_IGEMM_MINMAX_FP32_4X2__SCALAR_MAGIC, k_eq_1_subtile_m) {
+TEST(QU8_IGEMM_MINMAX_FP32_4X2__SCALAR_FMAGIC, k_eq_1_subtile_m) {
for (uint32_t m = 1; m <= 4; m++) {
GemmMicrokernelTester()
.mr(4)
@@ -53646,11 +53646,11 @@
.n(2)
.k(1)
.iterations(1)
- .Test(xnn_qu8_igemm_minmax_fp32_ukernel_4x2__scalar_magic, xnn_init_qu8_conv_minmax_fp32_scalar_magic_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
+ .Test(xnn_qu8_igemm_minmax_fp32_ukernel_4x2__scalar_fmagic, xnn_init_qu8_conv_minmax_fp32_scalar_fmagic_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
}
}
-TEST(QU8_IGEMM_MINMAX_FP32_4X2__SCALAR_MAGIC, k_eq_1_subtile_n) {
+TEST(QU8_IGEMM_MINMAX_FP32_4X2__SCALAR_FMAGIC, k_eq_1_subtile_n) {
for (uint32_t n = 1; n <= 2; n++) {
GemmMicrokernelTester()
.mr(4)
@@ -53661,11 +53661,11 @@
.n(n)
.k(1)
.iterations(1)
- .Test(xnn_qu8_igemm_minmax_fp32_ukernel_4x2__scalar_magic, xnn_init_qu8_conv_minmax_fp32_scalar_magic_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
+ .Test(xnn_qu8_igemm_minmax_fp32_ukernel_4x2__scalar_fmagic, xnn_init_qu8_conv_minmax_fp32_scalar_fmagic_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
}
}
-TEST(QU8_IGEMM_MINMAX_FP32_4X2__SCALAR_MAGIC, k_gt_1) {
+TEST(QU8_IGEMM_MINMAX_FP32_4X2__SCALAR_FMAGIC, k_gt_1) {
for (size_t k = 2; k < 10; k++) {
GemmMicrokernelTester()
.mr(4)
@@ -53675,11 +53675,11 @@
.m(4)
.n(2)
.k(k)
- .Test(xnn_qu8_igemm_minmax_fp32_ukernel_4x2__scalar_magic, xnn_init_qu8_conv_minmax_fp32_scalar_magic_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
+ .Test(xnn_qu8_igemm_minmax_fp32_ukernel_4x2__scalar_fmagic, xnn_init_qu8_conv_minmax_fp32_scalar_fmagic_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
}
}
-TEST(QU8_IGEMM_MINMAX_FP32_4X2__SCALAR_MAGIC, k_gt_1_subtile) {
+TEST(QU8_IGEMM_MINMAX_FP32_4X2__SCALAR_FMAGIC, k_gt_1_subtile) {
for (size_t k = 2; k < 10; k++) {
for (uint32_t m = 1; m <= 4; m++) {
for (uint32_t n = 1; n <= 2; n++) {
@@ -53692,13 +53692,13 @@
.n(n)
.k(k)
.iterations(1)
- .Test(xnn_qu8_igemm_minmax_fp32_ukernel_4x2__scalar_magic, xnn_init_qu8_conv_minmax_fp32_scalar_magic_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
+ .Test(xnn_qu8_igemm_minmax_fp32_ukernel_4x2__scalar_fmagic, xnn_init_qu8_conv_minmax_fp32_scalar_fmagic_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
}
}
}
}
-TEST(QU8_IGEMM_MINMAX_FP32_4X2__SCALAR_MAGIC, n_gt_2) {
+TEST(QU8_IGEMM_MINMAX_FP32_4X2__SCALAR_FMAGIC, n_gt_2) {
for (uint32_t n = 3; n < 4; n++) {
for (size_t k = 1; k <= 5; k += 2) {
GemmMicrokernelTester()
@@ -53709,12 +53709,12 @@
.m(4)
.n(2)
.k(k)
- .Test(xnn_qu8_igemm_minmax_fp32_ukernel_4x2__scalar_magic, xnn_init_qu8_conv_minmax_fp32_scalar_magic_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
+ .Test(xnn_qu8_igemm_minmax_fp32_ukernel_4x2__scalar_fmagic, xnn_init_qu8_conv_minmax_fp32_scalar_fmagic_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
}
}
}
-TEST(QU8_IGEMM_MINMAX_FP32_4X2__SCALAR_MAGIC, n_gt_2_strided_cn) {
+TEST(QU8_IGEMM_MINMAX_FP32_4X2__SCALAR_FMAGIC, n_gt_2_strided_cn) {
for (uint32_t n = 3; n < 4; n++) {
for (size_t k = 1; k <= 5; k += 2) {
GemmMicrokernelTester()
@@ -53726,12 +53726,12 @@
.n(2)
.k(k)
.cn_stride(5)
- .Test(xnn_qu8_igemm_minmax_fp32_ukernel_4x2__scalar_magic, xnn_init_qu8_conv_minmax_fp32_scalar_magic_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
+ .Test(xnn_qu8_igemm_minmax_fp32_ukernel_4x2__scalar_fmagic, xnn_init_qu8_conv_minmax_fp32_scalar_fmagic_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
}
}
}
-TEST(QU8_IGEMM_MINMAX_FP32_4X2__SCALAR_MAGIC, n_gt_2_subtile) {
+TEST(QU8_IGEMM_MINMAX_FP32_4X2__SCALAR_FMAGIC, n_gt_2_subtile) {
for (uint32_t n = 3; n < 4; n++) {
for (size_t k = 1; k <= 5; k += 2) {
for (uint32_t m = 1; m <= 4; m++) {
@@ -53744,13 +53744,13 @@
.n(n)
.k(k)
.iterations(1)
- .Test(xnn_qu8_igemm_minmax_fp32_ukernel_4x2__scalar_magic, xnn_init_qu8_conv_minmax_fp32_scalar_magic_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
+ .Test(xnn_qu8_igemm_minmax_fp32_ukernel_4x2__scalar_fmagic, xnn_init_qu8_conv_minmax_fp32_scalar_fmagic_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
}
}
}
}
-TEST(QU8_IGEMM_MINMAX_FP32_4X2__SCALAR_MAGIC, n_div_2) {
+TEST(QU8_IGEMM_MINMAX_FP32_4X2__SCALAR_FMAGIC, n_div_2) {
for (uint32_t n = 4; n <= 6; n += 2) {
for (size_t k = 1; k <= 5; k += 2) {
GemmMicrokernelTester()
@@ -53761,12 +53761,12 @@
.m(4)
.n(2)
.k(k)
- .Test(xnn_qu8_igemm_minmax_fp32_ukernel_4x2__scalar_magic, xnn_init_qu8_conv_minmax_fp32_scalar_magic_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
+ .Test(xnn_qu8_igemm_minmax_fp32_ukernel_4x2__scalar_fmagic, xnn_init_qu8_conv_minmax_fp32_scalar_fmagic_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
}
}
}
-TEST(QU8_IGEMM_MINMAX_FP32_4X2__SCALAR_MAGIC, n_div_2_strided_cn) {
+TEST(QU8_IGEMM_MINMAX_FP32_4X2__SCALAR_FMAGIC, n_div_2_strided_cn) {
for (uint32_t n = 4; n <= 6; n += 2) {
for (size_t k = 1; k <= 5; k += 2) {
GemmMicrokernelTester()
@@ -53778,12 +53778,12 @@
.n(n)
.k(k)
.cn_stride(5)
- .Test(xnn_qu8_igemm_minmax_fp32_ukernel_4x2__scalar_magic, xnn_init_qu8_conv_minmax_fp32_scalar_magic_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
+ .Test(xnn_qu8_igemm_minmax_fp32_ukernel_4x2__scalar_fmagic, xnn_init_qu8_conv_minmax_fp32_scalar_fmagic_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
}
}
}
-TEST(QU8_IGEMM_MINMAX_FP32_4X2__SCALAR_MAGIC, n_div_2_subtile) {
+TEST(QU8_IGEMM_MINMAX_FP32_4X2__SCALAR_FMAGIC, n_div_2_subtile) {
for (uint32_t n = 4; n <= 6; n += 2) {
for (size_t k = 1; k <= 5; k += 2) {
for (uint32_t m = 1; m <= 4; m++) {
@@ -53796,13 +53796,13 @@
.n(n)
.k(k)
.iterations(1)
- .Test(xnn_qu8_igemm_minmax_fp32_ukernel_4x2__scalar_magic, xnn_init_qu8_conv_minmax_fp32_scalar_magic_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
+ .Test(xnn_qu8_igemm_minmax_fp32_ukernel_4x2__scalar_fmagic, xnn_init_qu8_conv_minmax_fp32_scalar_fmagic_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
}
}
}
}
-TEST(QU8_IGEMM_MINMAX_FP32_4X2__SCALAR_MAGIC, small_kernel) {
+TEST(QU8_IGEMM_MINMAX_FP32_4X2__SCALAR_FMAGIC, small_kernel) {
for (size_t k = 1; k <= 5; k += 2) {
GemmMicrokernelTester()
.mr(4)
@@ -53813,11 +53813,11 @@
.n(2)
.k(k)
.ks(3)
- .Test(xnn_qu8_igemm_minmax_fp32_ukernel_4x2__scalar_magic, xnn_init_qu8_conv_minmax_fp32_scalar_magic_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
+ .Test(xnn_qu8_igemm_minmax_fp32_ukernel_4x2__scalar_fmagic, xnn_init_qu8_conv_minmax_fp32_scalar_fmagic_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
}
}
-TEST(QU8_IGEMM_MINMAX_FP32_4X2__SCALAR_MAGIC, small_kernel_subtile) {
+TEST(QU8_IGEMM_MINMAX_FP32_4X2__SCALAR_FMAGIC, small_kernel_subtile) {
for (size_t k = 1; k <= 5; k += 2) {
for (uint32_t m = 1; m <= 4; m++) {
for (uint32_t n = 1; n <= 2; n++) {
@@ -53831,13 +53831,13 @@
.k(k)
.ks(3)
.iterations(1)
- .Test(xnn_qu8_igemm_minmax_fp32_ukernel_4x2__scalar_magic, xnn_init_qu8_conv_minmax_fp32_scalar_magic_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
+ .Test(xnn_qu8_igemm_minmax_fp32_ukernel_4x2__scalar_fmagic, xnn_init_qu8_conv_minmax_fp32_scalar_fmagic_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
}
}
}
}
-TEST(QU8_IGEMM_MINMAX_FP32_4X2__SCALAR_MAGIC, n_gt_2_small_kernel) {
+TEST(QU8_IGEMM_MINMAX_FP32_4X2__SCALAR_FMAGIC, n_gt_2_small_kernel) {
for (uint32_t n = 3; n < 4; n++) {
for (size_t k = 1; k <= 5; k += 2) {
GemmMicrokernelTester()
@@ -53849,12 +53849,12 @@
.n(2)
.k(k)
.ks(3)
- .Test(xnn_qu8_igemm_minmax_fp32_ukernel_4x2__scalar_magic, xnn_init_qu8_conv_minmax_fp32_scalar_magic_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
+ .Test(xnn_qu8_igemm_minmax_fp32_ukernel_4x2__scalar_fmagic, xnn_init_qu8_conv_minmax_fp32_scalar_fmagic_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
}
}
}
-TEST(QU8_IGEMM_MINMAX_FP32_4X2__SCALAR_MAGIC, n_div_2_small_kernel) {
+TEST(QU8_IGEMM_MINMAX_FP32_4X2__SCALAR_FMAGIC, n_div_2_small_kernel) {
for (uint32_t n = 4; n <= 6; n += 2) {
for (size_t k = 1; k <= 5; k += 2) {
GemmMicrokernelTester()
@@ -53866,12 +53866,12 @@
.n(2)
.k(k)
.ks(3)
- .Test(xnn_qu8_igemm_minmax_fp32_ukernel_4x2__scalar_magic, xnn_init_qu8_conv_minmax_fp32_scalar_magic_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
+ .Test(xnn_qu8_igemm_minmax_fp32_ukernel_4x2__scalar_fmagic, xnn_init_qu8_conv_minmax_fp32_scalar_fmagic_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
}
}
}
-TEST(QU8_IGEMM_MINMAX_FP32_4X2__SCALAR_MAGIC, strided_cm_subtile) {
+TEST(QU8_IGEMM_MINMAX_FP32_4X2__SCALAR_FMAGIC, strided_cm_subtile) {
for (size_t k = 1; k <= 5; k += 2) {
for (uint32_t m = 1; m <= 4; m++) {
for (uint32_t n = 1; n <= 2; n++) {
@@ -53885,13 +53885,13 @@
.k(k)
.cm_stride(5)
.iterations(1)
- .Test(xnn_qu8_igemm_minmax_fp32_ukernel_4x2__scalar_magic, xnn_init_qu8_conv_minmax_fp32_scalar_magic_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
+ .Test(xnn_qu8_igemm_minmax_fp32_ukernel_4x2__scalar_fmagic, xnn_init_qu8_conv_minmax_fp32_scalar_fmagic_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
}
}
}
}
-TEST(QU8_IGEMM_MINMAX_FP32_4X2__SCALAR_MAGIC, a_offset) {
+TEST(QU8_IGEMM_MINMAX_FP32_4X2__SCALAR_FMAGIC, a_offset) {
for (size_t k = 1; k <= 5; k += 2) {
GemmMicrokernelTester()
.mr(4)
@@ -53903,11 +53903,11 @@
.k(k)
.ks(3)
.a_offset(23)
- .Test(xnn_qu8_igemm_minmax_fp32_ukernel_4x2__scalar_magic, xnn_init_qu8_conv_minmax_fp32_scalar_magic_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
+ .Test(xnn_qu8_igemm_minmax_fp32_ukernel_4x2__scalar_fmagic, xnn_init_qu8_conv_minmax_fp32_scalar_fmagic_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
}
}
-TEST(QU8_IGEMM_MINMAX_FP32_4X2__SCALAR_MAGIC, zero) {
+TEST(QU8_IGEMM_MINMAX_FP32_4X2__SCALAR_FMAGIC, zero) {
for (uint32_t mz = 0; mz < 4; mz++) {
for (size_t k = 1; k <= 5; k += 2) {
GemmMicrokernelTester()
@@ -53921,12 +53921,12 @@
.ks(3)
.a_offset(23)
.zero_index(mz)
- .Test(xnn_qu8_igemm_minmax_fp32_ukernel_4x2__scalar_magic, xnn_init_qu8_conv_minmax_fp32_scalar_magic_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
+ .Test(xnn_qu8_igemm_minmax_fp32_ukernel_4x2__scalar_fmagic, xnn_init_qu8_conv_minmax_fp32_scalar_fmagic_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
}
}
}
-TEST(QU8_IGEMM_MINMAX_FP32_4X2__SCALAR_MAGIC, qmin) {
+TEST(QU8_IGEMM_MINMAX_FP32_4X2__SCALAR_FMAGIC, qmin) {
GemmMicrokernelTester()
.mr(4)
.nr(2)
@@ -53936,10 +53936,10 @@
.n(2)
.k(1)
.qmin(128)
- .Test(xnn_qu8_igemm_minmax_fp32_ukernel_4x2__scalar_magic, xnn_init_qu8_conv_minmax_fp32_scalar_magic_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
+ .Test(xnn_qu8_igemm_minmax_fp32_ukernel_4x2__scalar_fmagic, xnn_init_qu8_conv_minmax_fp32_scalar_fmagic_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
}
-TEST(QU8_IGEMM_MINMAX_FP32_4X2__SCALAR_MAGIC, qmax) {
+TEST(QU8_IGEMM_MINMAX_FP32_4X2__SCALAR_FMAGIC, qmax) {
GemmMicrokernelTester()
.mr(4)
.nr(2)
@@ -53949,10 +53949,10 @@
.n(2)
.k(1)
.qmax(128)
- .Test(xnn_qu8_igemm_minmax_fp32_ukernel_4x2__scalar_magic, xnn_init_qu8_conv_minmax_fp32_scalar_magic_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
+ .Test(xnn_qu8_igemm_minmax_fp32_ukernel_4x2__scalar_fmagic, xnn_init_qu8_conv_minmax_fp32_scalar_fmagic_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
}
-TEST(QU8_IGEMM_MINMAX_FP32_4X2__SCALAR_MAGIC, strided_cm) {
+TEST(QU8_IGEMM_MINMAX_FP32_4X2__SCALAR_FMAGIC, strided_cm) {
GemmMicrokernelTester()
.mr(4)
.nr(2)
@@ -53962,10 +53962,10 @@
.n(2)
.k(1)
.cm_stride(5)
- .Test(xnn_qu8_igemm_minmax_fp32_ukernel_4x2__scalar_magic, xnn_init_qu8_conv_minmax_fp32_scalar_magic_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
+ .Test(xnn_qu8_igemm_minmax_fp32_ukernel_4x2__scalar_fmagic, xnn_init_qu8_conv_minmax_fp32_scalar_fmagic_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
}
-TEST(QU8_IGEMM_MINMAX_FP32_4X2__SCALAR_MAGIC, no_a_zero_point) {
+TEST(QU8_IGEMM_MINMAX_FP32_4X2__SCALAR_FMAGIC, no_a_zero_point) {
for (size_t k = 1; k <= 5; k += 2) {
GemmMicrokernelTester()
.mr(4)
@@ -53976,11 +53976,11 @@
.n(2)
.k(k)
.a_zero_point(0)
- .Test(xnn_qu8_igemm_minmax_fp32_ukernel_4x2__scalar_magic, xnn_init_qu8_conv_minmax_fp32_scalar_magic_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
+ .Test(xnn_qu8_igemm_minmax_fp32_ukernel_4x2__scalar_fmagic, xnn_init_qu8_conv_minmax_fp32_scalar_fmagic_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
}
}
-TEST(QU8_IGEMM_MINMAX_FP32_4X2__SCALAR_MAGIC, no_b_zero_point) {
+TEST(QU8_IGEMM_MINMAX_FP32_4X2__SCALAR_FMAGIC, no_b_zero_point) {
for (size_t k = 1; k <= 5; k += 2) {
GemmMicrokernelTester()
.mr(4)
@@ -53991,11 +53991,11 @@
.n(2)
.k(k)
.b_zero_point(0)
- .Test(xnn_qu8_igemm_minmax_fp32_ukernel_4x2__scalar_magic, xnn_init_qu8_conv_minmax_fp32_scalar_magic_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
+ .Test(xnn_qu8_igemm_minmax_fp32_ukernel_4x2__scalar_fmagic, xnn_init_qu8_conv_minmax_fp32_scalar_fmagic_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
}
}
-TEST(QU8_IGEMM_MINMAX_FP32_4X2__SCALAR_MAGIC, no_zero_point) {
+TEST(QU8_IGEMM_MINMAX_FP32_4X2__SCALAR_FMAGIC, no_zero_point) {
for (size_t k = 1; k <= 5; k += 2) {
GemmMicrokernelTester()
.mr(4)
@@ -54007,11 +54007,11 @@
.k(k)
.a_zero_point(0)
.b_zero_point(0)
- .Test(xnn_qu8_igemm_minmax_fp32_ukernel_4x2__scalar_magic, xnn_init_qu8_conv_minmax_fp32_scalar_magic_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
+ .Test(xnn_qu8_igemm_minmax_fp32_ukernel_4x2__scalar_fmagic, xnn_init_qu8_conv_minmax_fp32_scalar_fmagic_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
}
}
-TEST(QU8_IGEMM_MINMAX_FP32_1X4__SCALAR_MAGIC, k_eq_1) {
+TEST(QU8_IGEMM_MINMAX_FP32_1X4__SCALAR_FMAGIC, k_eq_1) {
GemmMicrokernelTester()
.mr(1)
.nr(4)
@@ -54020,10 +54020,10 @@
.m(1)
.n(4)
.k(1)
- .Test(xnn_qu8_igemm_minmax_fp32_ukernel_1x4__scalar_magic, xnn_init_qu8_conv_minmax_fp32_scalar_magic_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
+ .Test(xnn_qu8_igemm_minmax_fp32_ukernel_1x4__scalar_fmagic, xnn_init_qu8_conv_minmax_fp32_scalar_fmagic_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
}
-TEST(QU8_IGEMM_MINMAX_FP32_1X4__SCALAR_MAGIC, strided_cn) {
+TEST(QU8_IGEMM_MINMAX_FP32_1X4__SCALAR_FMAGIC, strided_cn) {
GemmMicrokernelTester()
.mr(1)
.nr(4)
@@ -54033,10 +54033,10 @@
.n(4)
.k(1)
.cn_stride(7)
- .Test(xnn_qu8_igemm_minmax_fp32_ukernel_1x4__scalar_magic, xnn_init_qu8_conv_minmax_fp32_scalar_magic_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
+ .Test(xnn_qu8_igemm_minmax_fp32_ukernel_1x4__scalar_fmagic, xnn_init_qu8_conv_minmax_fp32_scalar_fmagic_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
}
-TEST(QU8_IGEMM_MINMAX_FP32_1X4__SCALAR_MAGIC, k_eq_1_subtile) {
+TEST(QU8_IGEMM_MINMAX_FP32_1X4__SCALAR_FMAGIC, k_eq_1_subtile) {
for (uint32_t m = 1; m <= 1; m++) {
for (uint32_t n = 1; n <= 4; n++) {
GemmMicrokernelTester()
@@ -54048,12 +54048,12 @@
.n(n)
.k(1)
.iterations(1)
- .Test(xnn_qu8_igemm_minmax_fp32_ukernel_1x4__scalar_magic, xnn_init_qu8_conv_minmax_fp32_scalar_magic_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
+ .Test(xnn_qu8_igemm_minmax_fp32_ukernel_1x4__scalar_fmagic, xnn_init_qu8_conv_minmax_fp32_scalar_fmagic_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
}
}
}
-TEST(QU8_IGEMM_MINMAX_FP32_1X4__SCALAR_MAGIC, k_eq_1_subtile_m) {
+TEST(QU8_IGEMM_MINMAX_FP32_1X4__SCALAR_FMAGIC, k_eq_1_subtile_m) {
for (uint32_t m = 1; m <= 1; m++) {
GemmMicrokernelTester()
.mr(1)
@@ -54064,11 +54064,11 @@
.n(4)
.k(1)
.iterations(1)
- .Test(xnn_qu8_igemm_minmax_fp32_ukernel_1x4__scalar_magic, xnn_init_qu8_conv_minmax_fp32_scalar_magic_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
+ .Test(xnn_qu8_igemm_minmax_fp32_ukernel_1x4__scalar_fmagic, xnn_init_qu8_conv_minmax_fp32_scalar_fmagic_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
}
}
-TEST(QU8_IGEMM_MINMAX_FP32_1X4__SCALAR_MAGIC, k_eq_1_subtile_n) {
+TEST(QU8_IGEMM_MINMAX_FP32_1X4__SCALAR_FMAGIC, k_eq_1_subtile_n) {
for (uint32_t n = 1; n <= 4; n++) {
GemmMicrokernelTester()
.mr(1)
@@ -54079,11 +54079,11 @@
.n(n)
.k(1)
.iterations(1)
- .Test(xnn_qu8_igemm_minmax_fp32_ukernel_1x4__scalar_magic, xnn_init_qu8_conv_minmax_fp32_scalar_magic_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
+ .Test(xnn_qu8_igemm_minmax_fp32_ukernel_1x4__scalar_fmagic, xnn_init_qu8_conv_minmax_fp32_scalar_fmagic_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
}
}
-TEST(QU8_IGEMM_MINMAX_FP32_1X4__SCALAR_MAGIC, k_gt_1) {
+TEST(QU8_IGEMM_MINMAX_FP32_1X4__SCALAR_FMAGIC, k_gt_1) {
for (size_t k = 2; k < 10; k++) {
GemmMicrokernelTester()
.mr(1)
@@ -54093,11 +54093,11 @@
.m(1)
.n(4)
.k(k)
- .Test(xnn_qu8_igemm_minmax_fp32_ukernel_1x4__scalar_magic, xnn_init_qu8_conv_minmax_fp32_scalar_magic_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
+ .Test(xnn_qu8_igemm_minmax_fp32_ukernel_1x4__scalar_fmagic, xnn_init_qu8_conv_minmax_fp32_scalar_fmagic_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
}
}
-TEST(QU8_IGEMM_MINMAX_FP32_1X4__SCALAR_MAGIC, k_gt_1_subtile) {
+TEST(QU8_IGEMM_MINMAX_FP32_1X4__SCALAR_FMAGIC, k_gt_1_subtile) {
for (size_t k = 2; k < 10; k++) {
for (uint32_t m = 1; m <= 1; m++) {
for (uint32_t n = 1; n <= 4; n++) {
@@ -54110,13 +54110,13 @@
.n(n)
.k(k)
.iterations(1)
- .Test(xnn_qu8_igemm_minmax_fp32_ukernel_1x4__scalar_magic, xnn_init_qu8_conv_minmax_fp32_scalar_magic_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
+ .Test(xnn_qu8_igemm_minmax_fp32_ukernel_1x4__scalar_fmagic, xnn_init_qu8_conv_minmax_fp32_scalar_fmagic_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
}
}
}
}
-TEST(QU8_IGEMM_MINMAX_FP32_1X4__SCALAR_MAGIC, n_gt_4) {
+TEST(QU8_IGEMM_MINMAX_FP32_1X4__SCALAR_FMAGIC, n_gt_4) {
for (uint32_t n = 5; n < 8; n++) {
for (size_t k = 1; k <= 5; k += 2) {
GemmMicrokernelTester()
@@ -54127,12 +54127,12 @@
.m(1)
.n(4)
.k(k)
- .Test(xnn_qu8_igemm_minmax_fp32_ukernel_1x4__scalar_magic, xnn_init_qu8_conv_minmax_fp32_scalar_magic_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
+ .Test(xnn_qu8_igemm_minmax_fp32_ukernel_1x4__scalar_fmagic, xnn_init_qu8_conv_minmax_fp32_scalar_fmagic_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
}
}
}
-TEST(QU8_IGEMM_MINMAX_FP32_1X4__SCALAR_MAGIC, n_gt_4_strided_cn) {
+TEST(QU8_IGEMM_MINMAX_FP32_1X4__SCALAR_FMAGIC, n_gt_4_strided_cn) {
for (uint32_t n = 5; n < 8; n++) {
for (size_t k = 1; k <= 5; k += 2) {
GemmMicrokernelTester()
@@ -54144,12 +54144,12 @@
.n(4)
.k(k)
.cn_stride(7)
- .Test(xnn_qu8_igemm_minmax_fp32_ukernel_1x4__scalar_magic, xnn_init_qu8_conv_minmax_fp32_scalar_magic_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
+ .Test(xnn_qu8_igemm_minmax_fp32_ukernel_1x4__scalar_fmagic, xnn_init_qu8_conv_minmax_fp32_scalar_fmagic_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
}
}
}
-TEST(QU8_IGEMM_MINMAX_FP32_1X4__SCALAR_MAGIC, n_gt_4_subtile) {
+TEST(QU8_IGEMM_MINMAX_FP32_1X4__SCALAR_FMAGIC, n_gt_4_subtile) {
for (uint32_t n = 5; n < 8; n++) {
for (size_t k = 1; k <= 5; k += 2) {
for (uint32_t m = 1; m <= 1; m++) {
@@ -54162,13 +54162,13 @@
.n(n)
.k(k)
.iterations(1)
- .Test(xnn_qu8_igemm_minmax_fp32_ukernel_1x4__scalar_magic, xnn_init_qu8_conv_minmax_fp32_scalar_magic_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
+ .Test(xnn_qu8_igemm_minmax_fp32_ukernel_1x4__scalar_fmagic, xnn_init_qu8_conv_minmax_fp32_scalar_fmagic_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
}
}
}
}
-TEST(QU8_IGEMM_MINMAX_FP32_1X4__SCALAR_MAGIC, n_div_4) {
+TEST(QU8_IGEMM_MINMAX_FP32_1X4__SCALAR_FMAGIC, n_div_4) {
for (uint32_t n = 8; n <= 12; n += 4) {
for (size_t k = 1; k <= 5; k += 2) {
GemmMicrokernelTester()
@@ -54179,12 +54179,12 @@
.m(1)
.n(4)
.k(k)
- .Test(xnn_qu8_igemm_minmax_fp32_ukernel_1x4__scalar_magic, xnn_init_qu8_conv_minmax_fp32_scalar_magic_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
+ .Test(xnn_qu8_igemm_minmax_fp32_ukernel_1x4__scalar_fmagic, xnn_init_qu8_conv_minmax_fp32_scalar_fmagic_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
}
}
}
-TEST(QU8_IGEMM_MINMAX_FP32_1X4__SCALAR_MAGIC, n_div_4_strided_cn) {
+TEST(QU8_IGEMM_MINMAX_FP32_1X4__SCALAR_FMAGIC, n_div_4_strided_cn) {
for (uint32_t n = 8; n <= 12; n += 4) {
for (size_t k = 1; k <= 5; k += 2) {
GemmMicrokernelTester()
@@ -54196,12 +54196,12 @@
.n(n)
.k(k)
.cn_stride(7)
- .Test(xnn_qu8_igemm_minmax_fp32_ukernel_1x4__scalar_magic, xnn_init_qu8_conv_minmax_fp32_scalar_magic_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
+ .Test(xnn_qu8_igemm_minmax_fp32_ukernel_1x4__scalar_fmagic, xnn_init_qu8_conv_minmax_fp32_scalar_fmagic_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
}
}
}
-TEST(QU8_IGEMM_MINMAX_FP32_1X4__SCALAR_MAGIC, n_div_4_subtile) {
+TEST(QU8_IGEMM_MINMAX_FP32_1X4__SCALAR_FMAGIC, n_div_4_subtile) {
for (uint32_t n = 8; n <= 12; n += 4) {
for (size_t k = 1; k <= 5; k += 2) {
for (uint32_t m = 1; m <= 1; m++) {
@@ -54214,13 +54214,13 @@
.n(n)
.k(k)
.iterations(1)
- .Test(xnn_qu8_igemm_minmax_fp32_ukernel_1x4__scalar_magic, xnn_init_qu8_conv_minmax_fp32_scalar_magic_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
+ .Test(xnn_qu8_igemm_minmax_fp32_ukernel_1x4__scalar_fmagic, xnn_init_qu8_conv_minmax_fp32_scalar_fmagic_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
}
}
}
}
-TEST(QU8_IGEMM_MINMAX_FP32_1X4__SCALAR_MAGIC, small_kernel) {
+TEST(QU8_IGEMM_MINMAX_FP32_1X4__SCALAR_FMAGIC, small_kernel) {
for (size_t k = 1; k <= 5; k += 2) {
GemmMicrokernelTester()
.mr(1)
@@ -54231,11 +54231,11 @@
.n(4)
.k(k)
.ks(3)
- .Test(xnn_qu8_igemm_minmax_fp32_ukernel_1x4__scalar_magic, xnn_init_qu8_conv_minmax_fp32_scalar_magic_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
+ .Test(xnn_qu8_igemm_minmax_fp32_ukernel_1x4__scalar_fmagic, xnn_init_qu8_conv_minmax_fp32_scalar_fmagic_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
}
}
-TEST(QU8_IGEMM_MINMAX_FP32_1X4__SCALAR_MAGIC, small_kernel_subtile) {
+TEST(QU8_IGEMM_MINMAX_FP32_1X4__SCALAR_FMAGIC, small_kernel_subtile) {
for (size_t k = 1; k <= 5; k += 2) {
for (uint32_t m = 1; m <= 1; m++) {
for (uint32_t n = 1; n <= 4; n++) {
@@ -54249,13 +54249,13 @@
.k(k)
.ks(3)
.iterations(1)
- .Test(xnn_qu8_igemm_minmax_fp32_ukernel_1x4__scalar_magic, xnn_init_qu8_conv_minmax_fp32_scalar_magic_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
+ .Test(xnn_qu8_igemm_minmax_fp32_ukernel_1x4__scalar_fmagic, xnn_init_qu8_conv_minmax_fp32_scalar_fmagic_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
}
}
}
}
-TEST(QU8_IGEMM_MINMAX_FP32_1X4__SCALAR_MAGIC, n_gt_4_small_kernel) {
+TEST(QU8_IGEMM_MINMAX_FP32_1X4__SCALAR_FMAGIC, n_gt_4_small_kernel) {
for (uint32_t n = 5; n < 8; n++) {
for (size_t k = 1; k <= 5; k += 2) {
GemmMicrokernelTester()
@@ -54267,12 +54267,12 @@
.n(4)
.k(k)
.ks(3)
- .Test(xnn_qu8_igemm_minmax_fp32_ukernel_1x4__scalar_magic, xnn_init_qu8_conv_minmax_fp32_scalar_magic_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
+ .Test(xnn_qu8_igemm_minmax_fp32_ukernel_1x4__scalar_fmagic, xnn_init_qu8_conv_minmax_fp32_scalar_fmagic_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
}
}
}
-TEST(QU8_IGEMM_MINMAX_FP32_1X4__SCALAR_MAGIC, n_div_4_small_kernel) {
+TEST(QU8_IGEMM_MINMAX_FP32_1X4__SCALAR_FMAGIC, n_div_4_small_kernel) {
for (uint32_t n = 8; n <= 12; n += 4) {
for (size_t k = 1; k <= 5; k += 2) {
GemmMicrokernelTester()
@@ -54284,12 +54284,12 @@
.n(4)
.k(k)
.ks(3)
- .Test(xnn_qu8_igemm_minmax_fp32_ukernel_1x4__scalar_magic, xnn_init_qu8_conv_minmax_fp32_scalar_magic_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
+ .Test(xnn_qu8_igemm_minmax_fp32_ukernel_1x4__scalar_fmagic, xnn_init_qu8_conv_minmax_fp32_scalar_fmagic_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
}
}
}
-TEST(QU8_IGEMM_MINMAX_FP32_1X4__SCALAR_MAGIC, strided_cm_subtile) {
+TEST(QU8_IGEMM_MINMAX_FP32_1X4__SCALAR_FMAGIC, strided_cm_subtile) {
for (size_t k = 1; k <= 5; k += 2) {
for (uint32_t m = 1; m <= 1; m++) {
for (uint32_t n = 1; n <= 4; n++) {
@@ -54303,13 +54303,13 @@
.k(k)
.cm_stride(7)
.iterations(1)
- .Test(xnn_qu8_igemm_minmax_fp32_ukernel_1x4__scalar_magic, xnn_init_qu8_conv_minmax_fp32_scalar_magic_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
+ .Test(xnn_qu8_igemm_minmax_fp32_ukernel_1x4__scalar_fmagic, xnn_init_qu8_conv_minmax_fp32_scalar_fmagic_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
}
}
}
}
-TEST(QU8_IGEMM_MINMAX_FP32_1X4__SCALAR_MAGIC, a_offset) {
+TEST(QU8_IGEMM_MINMAX_FP32_1X4__SCALAR_FMAGIC, a_offset) {
for (size_t k = 1; k <= 5; k += 2) {
GemmMicrokernelTester()
.mr(1)
@@ -54321,11 +54321,11 @@
.k(k)
.ks(3)
.a_offset(7)
- .Test(xnn_qu8_igemm_minmax_fp32_ukernel_1x4__scalar_magic, xnn_init_qu8_conv_minmax_fp32_scalar_magic_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
+ .Test(xnn_qu8_igemm_minmax_fp32_ukernel_1x4__scalar_fmagic, xnn_init_qu8_conv_minmax_fp32_scalar_fmagic_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
}
}
-TEST(QU8_IGEMM_MINMAX_FP32_1X4__SCALAR_MAGIC, zero) {
+TEST(QU8_IGEMM_MINMAX_FP32_1X4__SCALAR_FMAGIC, zero) {
for (uint32_t mz = 0; mz < 1; mz++) {
for (size_t k = 1; k <= 5; k += 2) {
GemmMicrokernelTester()
@@ -54339,12 +54339,12 @@
.ks(3)
.a_offset(7)
.zero_index(mz)
- .Test(xnn_qu8_igemm_minmax_fp32_ukernel_1x4__scalar_magic, xnn_init_qu8_conv_minmax_fp32_scalar_magic_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
+ .Test(xnn_qu8_igemm_minmax_fp32_ukernel_1x4__scalar_fmagic, xnn_init_qu8_conv_minmax_fp32_scalar_fmagic_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
}
}
}
-TEST(QU8_IGEMM_MINMAX_FP32_1X4__SCALAR_MAGIC, qmin) {
+TEST(QU8_IGEMM_MINMAX_FP32_1X4__SCALAR_FMAGIC, qmin) {
GemmMicrokernelTester()
.mr(1)
.nr(4)
@@ -54354,10 +54354,10 @@
.n(4)
.k(1)
.qmin(128)
- .Test(xnn_qu8_igemm_minmax_fp32_ukernel_1x4__scalar_magic, xnn_init_qu8_conv_minmax_fp32_scalar_magic_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
+ .Test(xnn_qu8_igemm_minmax_fp32_ukernel_1x4__scalar_fmagic, xnn_init_qu8_conv_minmax_fp32_scalar_fmagic_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
}
-TEST(QU8_IGEMM_MINMAX_FP32_1X4__SCALAR_MAGIC, qmax) {
+TEST(QU8_IGEMM_MINMAX_FP32_1X4__SCALAR_FMAGIC, qmax) {
GemmMicrokernelTester()
.mr(1)
.nr(4)
@@ -54367,10 +54367,10 @@
.n(4)
.k(1)
.qmax(128)
- .Test(xnn_qu8_igemm_minmax_fp32_ukernel_1x4__scalar_magic, xnn_init_qu8_conv_minmax_fp32_scalar_magic_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
+ .Test(xnn_qu8_igemm_minmax_fp32_ukernel_1x4__scalar_fmagic, xnn_init_qu8_conv_minmax_fp32_scalar_fmagic_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
}
-TEST(QU8_IGEMM_MINMAX_FP32_1X4__SCALAR_MAGIC, strided_cm) {
+TEST(QU8_IGEMM_MINMAX_FP32_1X4__SCALAR_FMAGIC, strided_cm) {
GemmMicrokernelTester()
.mr(1)
.nr(4)
@@ -54380,10 +54380,10 @@
.n(4)
.k(1)
.cm_stride(7)
- .Test(xnn_qu8_igemm_minmax_fp32_ukernel_1x4__scalar_magic, xnn_init_qu8_conv_minmax_fp32_scalar_magic_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
+ .Test(xnn_qu8_igemm_minmax_fp32_ukernel_1x4__scalar_fmagic, xnn_init_qu8_conv_minmax_fp32_scalar_fmagic_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
}
-TEST(QU8_IGEMM_MINMAX_FP32_1X4__SCALAR_MAGIC, no_a_zero_point) {
+TEST(QU8_IGEMM_MINMAX_FP32_1X4__SCALAR_FMAGIC, no_a_zero_point) {
for (size_t k = 1; k <= 5; k += 2) {
GemmMicrokernelTester()
.mr(1)
@@ -54394,11 +54394,11 @@
.n(4)
.k(k)
.a_zero_point(0)
- .Test(xnn_qu8_igemm_minmax_fp32_ukernel_1x4__scalar_magic, xnn_init_qu8_conv_minmax_fp32_scalar_magic_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
+ .Test(xnn_qu8_igemm_minmax_fp32_ukernel_1x4__scalar_fmagic, xnn_init_qu8_conv_minmax_fp32_scalar_fmagic_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
}
}
-TEST(QU8_IGEMM_MINMAX_FP32_1X4__SCALAR_MAGIC, no_b_zero_point) {
+TEST(QU8_IGEMM_MINMAX_FP32_1X4__SCALAR_FMAGIC, no_b_zero_point) {
for (size_t k = 1; k <= 5; k += 2) {
GemmMicrokernelTester()
.mr(1)
@@ -54409,11 +54409,11 @@
.n(4)
.k(k)
.b_zero_point(0)
- .Test(xnn_qu8_igemm_minmax_fp32_ukernel_1x4__scalar_magic, xnn_init_qu8_conv_minmax_fp32_scalar_magic_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
+ .Test(xnn_qu8_igemm_minmax_fp32_ukernel_1x4__scalar_fmagic, xnn_init_qu8_conv_minmax_fp32_scalar_fmagic_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
}
}
-TEST(QU8_IGEMM_MINMAX_FP32_1X4__SCALAR_MAGIC, no_zero_point) {
+TEST(QU8_IGEMM_MINMAX_FP32_1X4__SCALAR_FMAGIC, no_zero_point) {
for (size_t k = 1; k <= 5; k += 2) {
GemmMicrokernelTester()
.mr(1)
@@ -54425,11 +54425,11 @@
.k(k)
.a_zero_point(0)
.b_zero_point(0)
- .Test(xnn_qu8_igemm_minmax_fp32_ukernel_1x4__scalar_magic, xnn_init_qu8_conv_minmax_fp32_scalar_magic_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
+ .Test(xnn_qu8_igemm_minmax_fp32_ukernel_1x4__scalar_fmagic, xnn_init_qu8_conv_minmax_fp32_scalar_fmagic_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
}
}
-TEST(QU8_IGEMM_MINMAX_FP32_2X4__SCALAR_MAGIC, k_eq_1) {
+TEST(QU8_IGEMM_MINMAX_FP32_2X4__SCALAR_FMAGIC, k_eq_1) {
GemmMicrokernelTester()
.mr(2)
.nr(4)
@@ -54438,10 +54438,10 @@
.m(2)
.n(4)
.k(1)
- .Test(xnn_qu8_igemm_minmax_fp32_ukernel_2x4__scalar_magic, xnn_init_qu8_conv_minmax_fp32_scalar_magic_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
+ .Test(xnn_qu8_igemm_minmax_fp32_ukernel_2x4__scalar_fmagic, xnn_init_qu8_conv_minmax_fp32_scalar_fmagic_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
}
-TEST(QU8_IGEMM_MINMAX_FP32_2X4__SCALAR_MAGIC, strided_cn) {
+TEST(QU8_IGEMM_MINMAX_FP32_2X4__SCALAR_FMAGIC, strided_cn) {
GemmMicrokernelTester()
.mr(2)
.nr(4)
@@ -54451,10 +54451,10 @@
.n(4)
.k(1)
.cn_stride(7)
- .Test(xnn_qu8_igemm_minmax_fp32_ukernel_2x4__scalar_magic, xnn_init_qu8_conv_minmax_fp32_scalar_magic_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
+ .Test(xnn_qu8_igemm_minmax_fp32_ukernel_2x4__scalar_fmagic, xnn_init_qu8_conv_minmax_fp32_scalar_fmagic_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
}
-TEST(QU8_IGEMM_MINMAX_FP32_2X4__SCALAR_MAGIC, k_eq_1_subtile) {
+TEST(QU8_IGEMM_MINMAX_FP32_2X4__SCALAR_FMAGIC, k_eq_1_subtile) {
for (uint32_t m = 1; m <= 2; m++) {
for (uint32_t n = 1; n <= 4; n++) {
GemmMicrokernelTester()
@@ -54466,12 +54466,12 @@
.n(n)
.k(1)
.iterations(1)
- .Test(xnn_qu8_igemm_minmax_fp32_ukernel_2x4__scalar_magic, xnn_init_qu8_conv_minmax_fp32_scalar_magic_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
+ .Test(xnn_qu8_igemm_minmax_fp32_ukernel_2x4__scalar_fmagic, xnn_init_qu8_conv_minmax_fp32_scalar_fmagic_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
}
}
}
-TEST(QU8_IGEMM_MINMAX_FP32_2X4__SCALAR_MAGIC, k_eq_1_subtile_m) {
+TEST(QU8_IGEMM_MINMAX_FP32_2X4__SCALAR_FMAGIC, k_eq_1_subtile_m) {
for (uint32_t m = 1; m <= 2; m++) {
GemmMicrokernelTester()
.mr(2)
@@ -54482,11 +54482,11 @@
.n(4)
.k(1)
.iterations(1)
- .Test(xnn_qu8_igemm_minmax_fp32_ukernel_2x4__scalar_magic, xnn_init_qu8_conv_minmax_fp32_scalar_magic_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
+ .Test(xnn_qu8_igemm_minmax_fp32_ukernel_2x4__scalar_fmagic, xnn_init_qu8_conv_minmax_fp32_scalar_fmagic_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
}
}
-TEST(QU8_IGEMM_MINMAX_FP32_2X4__SCALAR_MAGIC, k_eq_1_subtile_n) {
+TEST(QU8_IGEMM_MINMAX_FP32_2X4__SCALAR_FMAGIC, k_eq_1_subtile_n) {
for (uint32_t n = 1; n <= 4; n++) {
GemmMicrokernelTester()
.mr(2)
@@ -54497,11 +54497,11 @@
.n(n)
.k(1)
.iterations(1)
- .Test(xnn_qu8_igemm_minmax_fp32_ukernel_2x4__scalar_magic, xnn_init_qu8_conv_minmax_fp32_scalar_magic_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
+ .Test(xnn_qu8_igemm_minmax_fp32_ukernel_2x4__scalar_fmagic, xnn_init_qu8_conv_minmax_fp32_scalar_fmagic_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
}
}
-TEST(QU8_IGEMM_MINMAX_FP32_2X4__SCALAR_MAGIC, k_gt_1) {
+TEST(QU8_IGEMM_MINMAX_FP32_2X4__SCALAR_FMAGIC, k_gt_1) {
for (size_t k = 2; k < 10; k++) {
GemmMicrokernelTester()
.mr(2)
@@ -54511,11 +54511,11 @@
.m(2)
.n(4)
.k(k)
- .Test(xnn_qu8_igemm_minmax_fp32_ukernel_2x4__scalar_magic, xnn_init_qu8_conv_minmax_fp32_scalar_magic_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
+ .Test(xnn_qu8_igemm_minmax_fp32_ukernel_2x4__scalar_fmagic, xnn_init_qu8_conv_minmax_fp32_scalar_fmagic_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
}
}
-TEST(QU8_IGEMM_MINMAX_FP32_2X4__SCALAR_MAGIC, k_gt_1_subtile) {
+TEST(QU8_IGEMM_MINMAX_FP32_2X4__SCALAR_FMAGIC, k_gt_1_subtile) {
for (size_t k = 2; k < 10; k++) {
for (uint32_t m = 1; m <= 2; m++) {
for (uint32_t n = 1; n <= 4; n++) {
@@ -54528,13 +54528,13 @@
.n(n)
.k(k)
.iterations(1)
- .Test(xnn_qu8_igemm_minmax_fp32_ukernel_2x4__scalar_magic, xnn_init_qu8_conv_minmax_fp32_scalar_magic_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
+ .Test(xnn_qu8_igemm_minmax_fp32_ukernel_2x4__scalar_fmagic, xnn_init_qu8_conv_minmax_fp32_scalar_fmagic_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
}
}
}
}
-TEST(QU8_IGEMM_MINMAX_FP32_2X4__SCALAR_MAGIC, n_gt_4) {
+TEST(QU8_IGEMM_MINMAX_FP32_2X4__SCALAR_FMAGIC, n_gt_4) {
for (uint32_t n = 5; n < 8; n++) {
for (size_t k = 1; k <= 5; k += 2) {
GemmMicrokernelTester()
@@ -54545,12 +54545,12 @@
.m(2)
.n(4)
.k(k)
- .Test(xnn_qu8_igemm_minmax_fp32_ukernel_2x4__scalar_magic, xnn_init_qu8_conv_minmax_fp32_scalar_magic_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
+ .Test(xnn_qu8_igemm_minmax_fp32_ukernel_2x4__scalar_fmagic, xnn_init_qu8_conv_minmax_fp32_scalar_fmagic_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
}
}
}
-TEST(QU8_IGEMM_MINMAX_FP32_2X4__SCALAR_MAGIC, n_gt_4_strided_cn) {
+TEST(QU8_IGEMM_MINMAX_FP32_2X4__SCALAR_FMAGIC, n_gt_4_strided_cn) {
for (uint32_t n = 5; n < 8; n++) {
for (size_t k = 1; k <= 5; k += 2) {
GemmMicrokernelTester()
@@ -54562,12 +54562,12 @@
.n(4)
.k(k)
.cn_stride(7)
- .Test(xnn_qu8_igemm_minmax_fp32_ukernel_2x4__scalar_magic, xnn_init_qu8_conv_minmax_fp32_scalar_magic_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
+ .Test(xnn_qu8_igemm_minmax_fp32_ukernel_2x4__scalar_fmagic, xnn_init_qu8_conv_minmax_fp32_scalar_fmagic_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
}
}
}
-TEST(QU8_IGEMM_MINMAX_FP32_2X4__SCALAR_MAGIC, n_gt_4_subtile) {
+TEST(QU8_IGEMM_MINMAX_FP32_2X4__SCALAR_FMAGIC, n_gt_4_subtile) {
for (uint32_t n = 5; n < 8; n++) {
for (size_t k = 1; k <= 5; k += 2) {
for (uint32_t m = 1; m <= 2; m++) {
@@ -54580,13 +54580,13 @@
.n(n)
.k(k)
.iterations(1)
- .Test(xnn_qu8_igemm_minmax_fp32_ukernel_2x4__scalar_magic, xnn_init_qu8_conv_minmax_fp32_scalar_magic_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
+ .Test(xnn_qu8_igemm_minmax_fp32_ukernel_2x4__scalar_fmagic, xnn_init_qu8_conv_minmax_fp32_scalar_fmagic_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
}
}
}
}
-TEST(QU8_IGEMM_MINMAX_FP32_2X4__SCALAR_MAGIC, n_div_4) {
+TEST(QU8_IGEMM_MINMAX_FP32_2X4__SCALAR_FMAGIC, n_div_4) {
for (uint32_t n = 8; n <= 12; n += 4) {
for (size_t k = 1; k <= 5; k += 2) {
GemmMicrokernelTester()
@@ -54597,12 +54597,12 @@
.m(2)
.n(4)
.k(k)
- .Test(xnn_qu8_igemm_minmax_fp32_ukernel_2x4__scalar_magic, xnn_init_qu8_conv_minmax_fp32_scalar_magic_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
+ .Test(xnn_qu8_igemm_minmax_fp32_ukernel_2x4__scalar_fmagic, xnn_init_qu8_conv_minmax_fp32_scalar_fmagic_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
}
}
}
-TEST(QU8_IGEMM_MINMAX_FP32_2X4__SCALAR_MAGIC, n_div_4_strided_cn) {
+TEST(QU8_IGEMM_MINMAX_FP32_2X4__SCALAR_FMAGIC, n_div_4_strided_cn) {
for (uint32_t n = 8; n <= 12; n += 4) {
for (size_t k = 1; k <= 5; k += 2) {
GemmMicrokernelTester()
@@ -54614,12 +54614,12 @@
.n(n)
.k(k)
.cn_stride(7)
- .Test(xnn_qu8_igemm_minmax_fp32_ukernel_2x4__scalar_magic, xnn_init_qu8_conv_minmax_fp32_scalar_magic_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
+ .Test(xnn_qu8_igemm_minmax_fp32_ukernel_2x4__scalar_fmagic, xnn_init_qu8_conv_minmax_fp32_scalar_fmagic_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
}
}
}
-TEST(QU8_IGEMM_MINMAX_FP32_2X4__SCALAR_MAGIC, n_div_4_subtile) {
+TEST(QU8_IGEMM_MINMAX_FP32_2X4__SCALAR_FMAGIC, n_div_4_subtile) {
for (uint32_t n = 8; n <= 12; n += 4) {
for (size_t k = 1; k <= 5; k += 2) {
for (uint32_t m = 1; m <= 2; m++) {
@@ -54632,13 +54632,13 @@
.n(n)
.k(k)
.iterations(1)
- .Test(xnn_qu8_igemm_minmax_fp32_ukernel_2x4__scalar_magic, xnn_init_qu8_conv_minmax_fp32_scalar_magic_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
+ .Test(xnn_qu8_igemm_minmax_fp32_ukernel_2x4__scalar_fmagic, xnn_init_qu8_conv_minmax_fp32_scalar_fmagic_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
}
}
}
}
-TEST(QU8_IGEMM_MINMAX_FP32_2X4__SCALAR_MAGIC, small_kernel) {
+TEST(QU8_IGEMM_MINMAX_FP32_2X4__SCALAR_FMAGIC, small_kernel) {
for (size_t k = 1; k <= 5; k += 2) {
GemmMicrokernelTester()
.mr(2)
@@ -54649,11 +54649,11 @@
.n(4)
.k(k)
.ks(3)
- .Test(xnn_qu8_igemm_minmax_fp32_ukernel_2x4__scalar_magic, xnn_init_qu8_conv_minmax_fp32_scalar_magic_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
+ .Test(xnn_qu8_igemm_minmax_fp32_ukernel_2x4__scalar_fmagic, xnn_init_qu8_conv_minmax_fp32_scalar_fmagic_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
}
}
-TEST(QU8_IGEMM_MINMAX_FP32_2X4__SCALAR_MAGIC, small_kernel_subtile) {
+TEST(QU8_IGEMM_MINMAX_FP32_2X4__SCALAR_FMAGIC, small_kernel_subtile) {
for (size_t k = 1; k <= 5; k += 2) {
for (uint32_t m = 1; m <= 2; m++) {
for (uint32_t n = 1; n <= 4; n++) {
@@ -54667,13 +54667,13 @@
.k(k)
.ks(3)
.iterations(1)
- .Test(xnn_qu8_igemm_minmax_fp32_ukernel_2x4__scalar_magic, xnn_init_qu8_conv_minmax_fp32_scalar_magic_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
+ .Test(xnn_qu8_igemm_minmax_fp32_ukernel_2x4__scalar_fmagic, xnn_init_qu8_conv_minmax_fp32_scalar_fmagic_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
}
}
}
}
-TEST(QU8_IGEMM_MINMAX_FP32_2X4__SCALAR_MAGIC, n_gt_4_small_kernel) {
+TEST(QU8_IGEMM_MINMAX_FP32_2X4__SCALAR_FMAGIC, n_gt_4_small_kernel) {
for (uint32_t n = 5; n < 8; n++) {
for (size_t k = 1; k <= 5; k += 2) {
GemmMicrokernelTester()
@@ -54685,12 +54685,12 @@
.n(4)
.k(k)
.ks(3)
- .Test(xnn_qu8_igemm_minmax_fp32_ukernel_2x4__scalar_magic, xnn_init_qu8_conv_minmax_fp32_scalar_magic_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
+ .Test(xnn_qu8_igemm_minmax_fp32_ukernel_2x4__scalar_fmagic, xnn_init_qu8_conv_minmax_fp32_scalar_fmagic_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
}
}
}
-TEST(QU8_IGEMM_MINMAX_FP32_2X4__SCALAR_MAGIC, n_div_4_small_kernel) {
+TEST(QU8_IGEMM_MINMAX_FP32_2X4__SCALAR_FMAGIC, n_div_4_small_kernel) {
for (uint32_t n = 8; n <= 12; n += 4) {
for (size_t k = 1; k <= 5; k += 2) {
GemmMicrokernelTester()
@@ -54702,12 +54702,12 @@
.n(4)
.k(k)
.ks(3)
- .Test(xnn_qu8_igemm_minmax_fp32_ukernel_2x4__scalar_magic, xnn_init_qu8_conv_minmax_fp32_scalar_magic_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
+ .Test(xnn_qu8_igemm_minmax_fp32_ukernel_2x4__scalar_fmagic, xnn_init_qu8_conv_minmax_fp32_scalar_fmagic_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
}
}
}
-TEST(QU8_IGEMM_MINMAX_FP32_2X4__SCALAR_MAGIC, strided_cm_subtile) {
+TEST(QU8_IGEMM_MINMAX_FP32_2X4__SCALAR_FMAGIC, strided_cm_subtile) {
for (size_t k = 1; k <= 5; k += 2) {
for (uint32_t m = 1; m <= 2; m++) {
for (uint32_t n = 1; n <= 4; n++) {
@@ -54721,13 +54721,13 @@
.k(k)
.cm_stride(7)
.iterations(1)
- .Test(xnn_qu8_igemm_minmax_fp32_ukernel_2x4__scalar_magic, xnn_init_qu8_conv_minmax_fp32_scalar_magic_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
+ .Test(xnn_qu8_igemm_minmax_fp32_ukernel_2x4__scalar_fmagic, xnn_init_qu8_conv_minmax_fp32_scalar_fmagic_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
}
}
}
}
-TEST(QU8_IGEMM_MINMAX_FP32_2X4__SCALAR_MAGIC, a_offset) {
+TEST(QU8_IGEMM_MINMAX_FP32_2X4__SCALAR_FMAGIC, a_offset) {
for (size_t k = 1; k <= 5; k += 2) {
GemmMicrokernelTester()
.mr(2)
@@ -54739,11 +54739,11 @@
.k(k)
.ks(3)
.a_offset(13)
- .Test(xnn_qu8_igemm_minmax_fp32_ukernel_2x4__scalar_magic, xnn_init_qu8_conv_minmax_fp32_scalar_magic_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
+ .Test(xnn_qu8_igemm_minmax_fp32_ukernel_2x4__scalar_fmagic, xnn_init_qu8_conv_minmax_fp32_scalar_fmagic_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
}
}
-TEST(QU8_IGEMM_MINMAX_FP32_2X4__SCALAR_MAGIC, zero) {
+TEST(QU8_IGEMM_MINMAX_FP32_2X4__SCALAR_FMAGIC, zero) {
for (uint32_t mz = 0; mz < 2; mz++) {
for (size_t k = 1; k <= 5; k += 2) {
GemmMicrokernelTester()
@@ -54757,12 +54757,12 @@
.ks(3)
.a_offset(13)
.zero_index(mz)
- .Test(xnn_qu8_igemm_minmax_fp32_ukernel_2x4__scalar_magic, xnn_init_qu8_conv_minmax_fp32_scalar_magic_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
+ .Test(xnn_qu8_igemm_minmax_fp32_ukernel_2x4__scalar_fmagic, xnn_init_qu8_conv_minmax_fp32_scalar_fmagic_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
}
}
}
-TEST(QU8_IGEMM_MINMAX_FP32_2X4__SCALAR_MAGIC, qmin) {
+TEST(QU8_IGEMM_MINMAX_FP32_2X4__SCALAR_FMAGIC, qmin) {
GemmMicrokernelTester()
.mr(2)
.nr(4)
@@ -54772,10 +54772,10 @@
.n(4)
.k(1)
.qmin(128)
- .Test(xnn_qu8_igemm_minmax_fp32_ukernel_2x4__scalar_magic, xnn_init_qu8_conv_minmax_fp32_scalar_magic_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
+ .Test(xnn_qu8_igemm_minmax_fp32_ukernel_2x4__scalar_fmagic, xnn_init_qu8_conv_minmax_fp32_scalar_fmagic_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
}
-TEST(QU8_IGEMM_MINMAX_FP32_2X4__SCALAR_MAGIC, qmax) {
+TEST(QU8_IGEMM_MINMAX_FP32_2X4__SCALAR_FMAGIC, qmax) {
GemmMicrokernelTester()
.mr(2)
.nr(4)
@@ -54785,10 +54785,10 @@
.n(4)
.k(1)
.qmax(128)
- .Test(xnn_qu8_igemm_minmax_fp32_ukernel_2x4__scalar_magic, xnn_init_qu8_conv_minmax_fp32_scalar_magic_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
+ .Test(xnn_qu8_igemm_minmax_fp32_ukernel_2x4__scalar_fmagic, xnn_init_qu8_conv_minmax_fp32_scalar_fmagic_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
}
-TEST(QU8_IGEMM_MINMAX_FP32_2X4__SCALAR_MAGIC, strided_cm) {
+TEST(QU8_IGEMM_MINMAX_FP32_2X4__SCALAR_FMAGIC, strided_cm) {
GemmMicrokernelTester()
.mr(2)
.nr(4)
@@ -54798,10 +54798,10 @@
.n(4)
.k(1)
.cm_stride(7)
- .Test(xnn_qu8_igemm_minmax_fp32_ukernel_2x4__scalar_magic, xnn_init_qu8_conv_minmax_fp32_scalar_magic_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
+ .Test(xnn_qu8_igemm_minmax_fp32_ukernel_2x4__scalar_fmagic, xnn_init_qu8_conv_minmax_fp32_scalar_fmagic_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
}
-TEST(QU8_IGEMM_MINMAX_FP32_2X4__SCALAR_MAGIC, no_a_zero_point) {
+TEST(QU8_IGEMM_MINMAX_FP32_2X4__SCALAR_FMAGIC, no_a_zero_point) {
for (size_t k = 1; k <= 5; k += 2) {
GemmMicrokernelTester()
.mr(2)
@@ -54812,11 +54812,11 @@
.n(4)
.k(k)
.a_zero_point(0)
- .Test(xnn_qu8_igemm_minmax_fp32_ukernel_2x4__scalar_magic, xnn_init_qu8_conv_minmax_fp32_scalar_magic_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
+ .Test(xnn_qu8_igemm_minmax_fp32_ukernel_2x4__scalar_fmagic, xnn_init_qu8_conv_minmax_fp32_scalar_fmagic_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
}
}
-TEST(QU8_IGEMM_MINMAX_FP32_2X4__SCALAR_MAGIC, no_b_zero_point) {
+TEST(QU8_IGEMM_MINMAX_FP32_2X4__SCALAR_FMAGIC, no_b_zero_point) {
for (size_t k = 1; k <= 5; k += 2) {
GemmMicrokernelTester()
.mr(2)
@@ -54827,11 +54827,11 @@
.n(4)
.k(k)
.b_zero_point(0)
- .Test(xnn_qu8_igemm_minmax_fp32_ukernel_2x4__scalar_magic, xnn_init_qu8_conv_minmax_fp32_scalar_magic_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
+ .Test(xnn_qu8_igemm_minmax_fp32_ukernel_2x4__scalar_fmagic, xnn_init_qu8_conv_minmax_fp32_scalar_fmagic_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
}
}
-TEST(QU8_IGEMM_MINMAX_FP32_2X4__SCALAR_MAGIC, no_zero_point) {
+TEST(QU8_IGEMM_MINMAX_FP32_2X4__SCALAR_FMAGIC, no_zero_point) {
for (size_t k = 1; k <= 5; k += 2) {
GemmMicrokernelTester()
.mr(2)
@@ -54843,11 +54843,11 @@
.k(k)
.a_zero_point(0)
.b_zero_point(0)
- .Test(xnn_qu8_igemm_minmax_fp32_ukernel_2x4__scalar_magic, xnn_init_qu8_conv_minmax_fp32_scalar_magic_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
+ .Test(xnn_qu8_igemm_minmax_fp32_ukernel_2x4__scalar_fmagic, xnn_init_qu8_conv_minmax_fp32_scalar_fmagic_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
}
}
-TEST(QU8_IGEMM_MINMAX_FP32_3X4__SCALAR_MAGIC, k_eq_1) {
+TEST(QU8_IGEMM_MINMAX_FP32_3X4__SCALAR_FMAGIC, k_eq_1) {
GemmMicrokernelTester()
.mr(3)
.nr(4)
@@ -54856,10 +54856,10 @@
.m(3)
.n(4)
.k(1)
- .Test(xnn_qu8_igemm_minmax_fp32_ukernel_3x4__scalar_magic, xnn_init_qu8_conv_minmax_fp32_scalar_magic_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
+ .Test(xnn_qu8_igemm_minmax_fp32_ukernel_3x4__scalar_fmagic, xnn_init_qu8_conv_minmax_fp32_scalar_fmagic_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
}
-TEST(QU8_IGEMM_MINMAX_FP32_3X4__SCALAR_MAGIC, strided_cn) {
+TEST(QU8_IGEMM_MINMAX_FP32_3X4__SCALAR_FMAGIC, strided_cn) {
GemmMicrokernelTester()
.mr(3)
.nr(4)
@@ -54869,10 +54869,10 @@
.n(4)
.k(1)
.cn_stride(7)
- .Test(xnn_qu8_igemm_minmax_fp32_ukernel_3x4__scalar_magic, xnn_init_qu8_conv_minmax_fp32_scalar_magic_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
+ .Test(xnn_qu8_igemm_minmax_fp32_ukernel_3x4__scalar_fmagic, xnn_init_qu8_conv_minmax_fp32_scalar_fmagic_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
}
-TEST(QU8_IGEMM_MINMAX_FP32_3X4__SCALAR_MAGIC, k_eq_1_subtile) {
+TEST(QU8_IGEMM_MINMAX_FP32_3X4__SCALAR_FMAGIC, k_eq_1_subtile) {
for (uint32_t m = 1; m <= 3; m++) {
for (uint32_t n = 1; n <= 4; n++) {
GemmMicrokernelTester()
@@ -54884,12 +54884,12 @@
.n(n)
.k(1)
.iterations(1)
- .Test(xnn_qu8_igemm_minmax_fp32_ukernel_3x4__scalar_magic, xnn_init_qu8_conv_minmax_fp32_scalar_magic_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
+ .Test(xnn_qu8_igemm_minmax_fp32_ukernel_3x4__scalar_fmagic, xnn_init_qu8_conv_minmax_fp32_scalar_fmagic_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
}
}
}
-TEST(QU8_IGEMM_MINMAX_FP32_3X4__SCALAR_MAGIC, k_eq_1_subtile_m) {
+TEST(QU8_IGEMM_MINMAX_FP32_3X4__SCALAR_FMAGIC, k_eq_1_subtile_m) {
for (uint32_t m = 1; m <= 3; m++) {
GemmMicrokernelTester()
.mr(3)
@@ -54900,11 +54900,11 @@
.n(4)
.k(1)
.iterations(1)
- .Test(xnn_qu8_igemm_minmax_fp32_ukernel_3x4__scalar_magic, xnn_init_qu8_conv_minmax_fp32_scalar_magic_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
+ .Test(xnn_qu8_igemm_minmax_fp32_ukernel_3x4__scalar_fmagic, xnn_init_qu8_conv_minmax_fp32_scalar_fmagic_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
}
}
-TEST(QU8_IGEMM_MINMAX_FP32_3X4__SCALAR_MAGIC, k_eq_1_subtile_n) {
+TEST(QU8_IGEMM_MINMAX_FP32_3X4__SCALAR_FMAGIC, k_eq_1_subtile_n) {
for (uint32_t n = 1; n <= 4; n++) {
GemmMicrokernelTester()
.mr(3)
@@ -54915,11 +54915,11 @@
.n(n)
.k(1)
.iterations(1)
- .Test(xnn_qu8_igemm_minmax_fp32_ukernel_3x4__scalar_magic, xnn_init_qu8_conv_minmax_fp32_scalar_magic_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
+ .Test(xnn_qu8_igemm_minmax_fp32_ukernel_3x4__scalar_fmagic, xnn_init_qu8_conv_minmax_fp32_scalar_fmagic_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
}
}
-TEST(QU8_IGEMM_MINMAX_FP32_3X4__SCALAR_MAGIC, k_gt_1) {
+TEST(QU8_IGEMM_MINMAX_FP32_3X4__SCALAR_FMAGIC, k_gt_1) {
for (size_t k = 2; k < 10; k++) {
GemmMicrokernelTester()
.mr(3)
@@ -54929,11 +54929,11 @@
.m(3)
.n(4)
.k(k)
- .Test(xnn_qu8_igemm_minmax_fp32_ukernel_3x4__scalar_magic, xnn_init_qu8_conv_minmax_fp32_scalar_magic_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
+ .Test(xnn_qu8_igemm_minmax_fp32_ukernel_3x4__scalar_fmagic, xnn_init_qu8_conv_minmax_fp32_scalar_fmagic_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
}
}
-TEST(QU8_IGEMM_MINMAX_FP32_3X4__SCALAR_MAGIC, k_gt_1_subtile) {
+TEST(QU8_IGEMM_MINMAX_FP32_3X4__SCALAR_FMAGIC, k_gt_1_subtile) {
for (size_t k = 2; k < 10; k++) {
for (uint32_t m = 1; m <= 3; m++) {
for (uint32_t n = 1; n <= 4; n++) {
@@ -54946,13 +54946,13 @@
.n(n)
.k(k)
.iterations(1)
- .Test(xnn_qu8_igemm_minmax_fp32_ukernel_3x4__scalar_magic, xnn_init_qu8_conv_minmax_fp32_scalar_magic_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
+ .Test(xnn_qu8_igemm_minmax_fp32_ukernel_3x4__scalar_fmagic, xnn_init_qu8_conv_minmax_fp32_scalar_fmagic_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
}
}
}
}
-TEST(QU8_IGEMM_MINMAX_FP32_3X4__SCALAR_MAGIC, n_gt_4) {
+TEST(QU8_IGEMM_MINMAX_FP32_3X4__SCALAR_FMAGIC, n_gt_4) {
for (uint32_t n = 5; n < 8; n++) {
for (size_t k = 1; k <= 5; k += 2) {
GemmMicrokernelTester()
@@ -54963,12 +54963,12 @@
.m(3)
.n(4)
.k(k)
- .Test(xnn_qu8_igemm_minmax_fp32_ukernel_3x4__scalar_magic, xnn_init_qu8_conv_minmax_fp32_scalar_magic_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
+ .Test(xnn_qu8_igemm_minmax_fp32_ukernel_3x4__scalar_fmagic, xnn_init_qu8_conv_minmax_fp32_scalar_fmagic_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
}
}
}
-TEST(QU8_IGEMM_MINMAX_FP32_3X4__SCALAR_MAGIC, n_gt_4_strided_cn) {
+TEST(QU8_IGEMM_MINMAX_FP32_3X4__SCALAR_FMAGIC, n_gt_4_strided_cn) {
for (uint32_t n = 5; n < 8; n++) {
for (size_t k = 1; k <= 5; k += 2) {
GemmMicrokernelTester()
@@ -54980,12 +54980,12 @@
.n(4)
.k(k)
.cn_stride(7)
- .Test(xnn_qu8_igemm_minmax_fp32_ukernel_3x4__scalar_magic, xnn_init_qu8_conv_minmax_fp32_scalar_magic_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
+ .Test(xnn_qu8_igemm_minmax_fp32_ukernel_3x4__scalar_fmagic, xnn_init_qu8_conv_minmax_fp32_scalar_fmagic_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
}
}
}
-TEST(QU8_IGEMM_MINMAX_FP32_3X4__SCALAR_MAGIC, n_gt_4_subtile) {
+TEST(QU8_IGEMM_MINMAX_FP32_3X4__SCALAR_FMAGIC, n_gt_4_subtile) {
for (uint32_t n = 5; n < 8; n++) {
for (size_t k = 1; k <= 5; k += 2) {
for (uint32_t m = 1; m <= 3; m++) {
@@ -54998,13 +54998,13 @@
.n(n)
.k(k)
.iterations(1)
- .Test(xnn_qu8_igemm_minmax_fp32_ukernel_3x4__scalar_magic, xnn_init_qu8_conv_minmax_fp32_scalar_magic_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
+ .Test(xnn_qu8_igemm_minmax_fp32_ukernel_3x4__scalar_fmagic, xnn_init_qu8_conv_minmax_fp32_scalar_fmagic_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
}
}
}
}
-TEST(QU8_IGEMM_MINMAX_FP32_3X4__SCALAR_MAGIC, n_div_4) {
+TEST(QU8_IGEMM_MINMAX_FP32_3X4__SCALAR_FMAGIC, n_div_4) {
for (uint32_t n = 8; n <= 12; n += 4) {
for (size_t k = 1; k <= 5; k += 2) {
GemmMicrokernelTester()
@@ -55015,12 +55015,12 @@
.m(3)
.n(4)
.k(k)
- .Test(xnn_qu8_igemm_minmax_fp32_ukernel_3x4__scalar_magic, xnn_init_qu8_conv_minmax_fp32_scalar_magic_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
+ .Test(xnn_qu8_igemm_minmax_fp32_ukernel_3x4__scalar_fmagic, xnn_init_qu8_conv_minmax_fp32_scalar_fmagic_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
}
}
}
-TEST(QU8_IGEMM_MINMAX_FP32_3X4__SCALAR_MAGIC, n_div_4_strided_cn) {
+TEST(QU8_IGEMM_MINMAX_FP32_3X4__SCALAR_FMAGIC, n_div_4_strided_cn) {
for (uint32_t n = 8; n <= 12; n += 4) {
for (size_t k = 1; k <= 5; k += 2) {
GemmMicrokernelTester()
@@ -55032,12 +55032,12 @@
.n(n)
.k(k)
.cn_stride(7)
- .Test(xnn_qu8_igemm_minmax_fp32_ukernel_3x4__scalar_magic, xnn_init_qu8_conv_minmax_fp32_scalar_magic_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
+ .Test(xnn_qu8_igemm_minmax_fp32_ukernel_3x4__scalar_fmagic, xnn_init_qu8_conv_minmax_fp32_scalar_fmagic_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
}
}
}
-TEST(QU8_IGEMM_MINMAX_FP32_3X4__SCALAR_MAGIC, n_div_4_subtile) {
+TEST(QU8_IGEMM_MINMAX_FP32_3X4__SCALAR_FMAGIC, n_div_4_subtile) {
for (uint32_t n = 8; n <= 12; n += 4) {
for (size_t k = 1; k <= 5; k += 2) {
for (uint32_t m = 1; m <= 3; m++) {
@@ -55050,13 +55050,13 @@
.n(n)
.k(k)
.iterations(1)
- .Test(xnn_qu8_igemm_minmax_fp32_ukernel_3x4__scalar_magic, xnn_init_qu8_conv_minmax_fp32_scalar_magic_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
+ .Test(xnn_qu8_igemm_minmax_fp32_ukernel_3x4__scalar_fmagic, xnn_init_qu8_conv_minmax_fp32_scalar_fmagic_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
}
}
}
}
-TEST(QU8_IGEMM_MINMAX_FP32_3X4__SCALAR_MAGIC, small_kernel) {
+TEST(QU8_IGEMM_MINMAX_FP32_3X4__SCALAR_FMAGIC, small_kernel) {
for (size_t k = 1; k <= 5; k += 2) {
GemmMicrokernelTester()
.mr(3)
@@ -55067,11 +55067,11 @@
.n(4)
.k(k)
.ks(3)
- .Test(xnn_qu8_igemm_minmax_fp32_ukernel_3x4__scalar_magic, xnn_init_qu8_conv_minmax_fp32_scalar_magic_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
+ .Test(xnn_qu8_igemm_minmax_fp32_ukernel_3x4__scalar_fmagic, xnn_init_qu8_conv_minmax_fp32_scalar_fmagic_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
}
}
-TEST(QU8_IGEMM_MINMAX_FP32_3X4__SCALAR_MAGIC, small_kernel_subtile) {
+TEST(QU8_IGEMM_MINMAX_FP32_3X4__SCALAR_FMAGIC, small_kernel_subtile) {
for (size_t k = 1; k <= 5; k += 2) {
for (uint32_t m = 1; m <= 3; m++) {
for (uint32_t n = 1; n <= 4; n++) {
@@ -55085,13 +55085,13 @@
.k(k)
.ks(3)
.iterations(1)
- .Test(xnn_qu8_igemm_minmax_fp32_ukernel_3x4__scalar_magic, xnn_init_qu8_conv_minmax_fp32_scalar_magic_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
+ .Test(xnn_qu8_igemm_minmax_fp32_ukernel_3x4__scalar_fmagic, xnn_init_qu8_conv_minmax_fp32_scalar_fmagic_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
}
}
}
}
-TEST(QU8_IGEMM_MINMAX_FP32_3X4__SCALAR_MAGIC, n_gt_4_small_kernel) {
+TEST(QU8_IGEMM_MINMAX_FP32_3X4__SCALAR_FMAGIC, n_gt_4_small_kernel) {
for (uint32_t n = 5; n < 8; n++) {
for (size_t k = 1; k <= 5; k += 2) {
GemmMicrokernelTester()
@@ -55103,12 +55103,12 @@
.n(4)
.k(k)
.ks(3)
- .Test(xnn_qu8_igemm_minmax_fp32_ukernel_3x4__scalar_magic, xnn_init_qu8_conv_minmax_fp32_scalar_magic_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
+ .Test(xnn_qu8_igemm_minmax_fp32_ukernel_3x4__scalar_fmagic, xnn_init_qu8_conv_minmax_fp32_scalar_fmagic_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
}
}
}
-TEST(QU8_IGEMM_MINMAX_FP32_3X4__SCALAR_MAGIC, n_div_4_small_kernel) {
+TEST(QU8_IGEMM_MINMAX_FP32_3X4__SCALAR_FMAGIC, n_div_4_small_kernel) {
for (uint32_t n = 8; n <= 12; n += 4) {
for (size_t k = 1; k <= 5; k += 2) {
GemmMicrokernelTester()
@@ -55120,12 +55120,12 @@
.n(4)
.k(k)
.ks(3)
- .Test(xnn_qu8_igemm_minmax_fp32_ukernel_3x4__scalar_magic, xnn_init_qu8_conv_minmax_fp32_scalar_magic_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
+ .Test(xnn_qu8_igemm_minmax_fp32_ukernel_3x4__scalar_fmagic, xnn_init_qu8_conv_minmax_fp32_scalar_fmagic_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
}
}
}
-TEST(QU8_IGEMM_MINMAX_FP32_3X4__SCALAR_MAGIC, strided_cm_subtile) {
+TEST(QU8_IGEMM_MINMAX_FP32_3X4__SCALAR_FMAGIC, strided_cm_subtile) {
for (size_t k = 1; k <= 5; k += 2) {
for (uint32_t m = 1; m <= 3; m++) {
for (uint32_t n = 1; n <= 4; n++) {
@@ -55139,13 +55139,13 @@
.k(k)
.cm_stride(7)
.iterations(1)
- .Test(xnn_qu8_igemm_minmax_fp32_ukernel_3x4__scalar_magic, xnn_init_qu8_conv_minmax_fp32_scalar_magic_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
+ .Test(xnn_qu8_igemm_minmax_fp32_ukernel_3x4__scalar_fmagic, xnn_init_qu8_conv_minmax_fp32_scalar_fmagic_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
}
}
}
}
-TEST(QU8_IGEMM_MINMAX_FP32_3X4__SCALAR_MAGIC, a_offset) {
+TEST(QU8_IGEMM_MINMAX_FP32_3X4__SCALAR_FMAGIC, a_offset) {
for (size_t k = 1; k <= 5; k += 2) {
GemmMicrokernelTester()
.mr(3)
@@ -55157,11 +55157,11 @@
.k(k)
.ks(3)
.a_offset(17)
- .Test(xnn_qu8_igemm_minmax_fp32_ukernel_3x4__scalar_magic, xnn_init_qu8_conv_minmax_fp32_scalar_magic_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
+ .Test(xnn_qu8_igemm_minmax_fp32_ukernel_3x4__scalar_fmagic, xnn_init_qu8_conv_minmax_fp32_scalar_fmagic_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
}
}
-TEST(QU8_IGEMM_MINMAX_FP32_3X4__SCALAR_MAGIC, zero) {
+TEST(QU8_IGEMM_MINMAX_FP32_3X4__SCALAR_FMAGIC, zero) {
for (uint32_t mz = 0; mz < 3; mz++) {
for (size_t k = 1; k <= 5; k += 2) {
GemmMicrokernelTester()
@@ -55175,12 +55175,12 @@
.ks(3)
.a_offset(17)
.zero_index(mz)
- .Test(xnn_qu8_igemm_minmax_fp32_ukernel_3x4__scalar_magic, xnn_init_qu8_conv_minmax_fp32_scalar_magic_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
+ .Test(xnn_qu8_igemm_minmax_fp32_ukernel_3x4__scalar_fmagic, xnn_init_qu8_conv_minmax_fp32_scalar_fmagic_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
}
}
}
-TEST(QU8_IGEMM_MINMAX_FP32_3X4__SCALAR_MAGIC, qmin) {
+TEST(QU8_IGEMM_MINMAX_FP32_3X4__SCALAR_FMAGIC, qmin) {
GemmMicrokernelTester()
.mr(3)
.nr(4)
@@ -55190,10 +55190,10 @@
.n(4)
.k(1)
.qmin(128)
- .Test(xnn_qu8_igemm_minmax_fp32_ukernel_3x4__scalar_magic, xnn_init_qu8_conv_minmax_fp32_scalar_magic_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
+ .Test(xnn_qu8_igemm_minmax_fp32_ukernel_3x4__scalar_fmagic, xnn_init_qu8_conv_minmax_fp32_scalar_fmagic_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
}
-TEST(QU8_IGEMM_MINMAX_FP32_3X4__SCALAR_MAGIC, qmax) {
+TEST(QU8_IGEMM_MINMAX_FP32_3X4__SCALAR_FMAGIC, qmax) {
GemmMicrokernelTester()
.mr(3)
.nr(4)
@@ -55203,10 +55203,10 @@
.n(4)
.k(1)
.qmax(128)
- .Test(xnn_qu8_igemm_minmax_fp32_ukernel_3x4__scalar_magic, xnn_init_qu8_conv_minmax_fp32_scalar_magic_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
+ .Test(xnn_qu8_igemm_minmax_fp32_ukernel_3x4__scalar_fmagic, xnn_init_qu8_conv_minmax_fp32_scalar_fmagic_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
}
-TEST(QU8_IGEMM_MINMAX_FP32_3X4__SCALAR_MAGIC, strided_cm) {
+TEST(QU8_IGEMM_MINMAX_FP32_3X4__SCALAR_FMAGIC, strided_cm) {
GemmMicrokernelTester()
.mr(3)
.nr(4)
@@ -55216,10 +55216,10 @@
.n(4)
.k(1)
.cm_stride(7)
- .Test(xnn_qu8_igemm_minmax_fp32_ukernel_3x4__scalar_magic, xnn_init_qu8_conv_minmax_fp32_scalar_magic_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
+ .Test(xnn_qu8_igemm_minmax_fp32_ukernel_3x4__scalar_fmagic, xnn_init_qu8_conv_minmax_fp32_scalar_fmagic_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
}
-TEST(QU8_IGEMM_MINMAX_FP32_3X4__SCALAR_MAGIC, no_a_zero_point) {
+TEST(QU8_IGEMM_MINMAX_FP32_3X4__SCALAR_FMAGIC, no_a_zero_point) {
for (size_t k = 1; k <= 5; k += 2) {
GemmMicrokernelTester()
.mr(3)
@@ -55230,11 +55230,11 @@
.n(4)
.k(k)
.a_zero_point(0)
- .Test(xnn_qu8_igemm_minmax_fp32_ukernel_3x4__scalar_magic, xnn_init_qu8_conv_minmax_fp32_scalar_magic_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
+ .Test(xnn_qu8_igemm_minmax_fp32_ukernel_3x4__scalar_fmagic, xnn_init_qu8_conv_minmax_fp32_scalar_fmagic_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
}
}
-TEST(QU8_IGEMM_MINMAX_FP32_3X4__SCALAR_MAGIC, no_b_zero_point) {
+TEST(QU8_IGEMM_MINMAX_FP32_3X4__SCALAR_FMAGIC, no_b_zero_point) {
for (size_t k = 1; k <= 5; k += 2) {
GemmMicrokernelTester()
.mr(3)
@@ -55245,11 +55245,11 @@
.n(4)
.k(k)
.b_zero_point(0)
- .Test(xnn_qu8_igemm_minmax_fp32_ukernel_3x4__scalar_magic, xnn_init_qu8_conv_minmax_fp32_scalar_magic_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
+ .Test(xnn_qu8_igemm_minmax_fp32_ukernel_3x4__scalar_fmagic, xnn_init_qu8_conv_minmax_fp32_scalar_fmagic_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
}
}
-TEST(QU8_IGEMM_MINMAX_FP32_3X4__SCALAR_MAGIC, no_zero_point) {
+TEST(QU8_IGEMM_MINMAX_FP32_3X4__SCALAR_FMAGIC, no_zero_point) {
for (size_t k = 1; k <= 5; k += 2) {
GemmMicrokernelTester()
.mr(3)
@@ -55261,11 +55261,11 @@
.k(k)
.a_zero_point(0)
.b_zero_point(0)
- .Test(xnn_qu8_igemm_minmax_fp32_ukernel_3x4__scalar_magic, xnn_init_qu8_conv_minmax_fp32_scalar_magic_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
+ .Test(xnn_qu8_igemm_minmax_fp32_ukernel_3x4__scalar_fmagic, xnn_init_qu8_conv_minmax_fp32_scalar_fmagic_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
}
}
-TEST(QU8_IGEMM_MINMAX_FP32_4X4__SCALAR_MAGIC, k_eq_1) {
+TEST(QU8_IGEMM_MINMAX_FP32_4X4__SCALAR_FMAGIC, k_eq_1) {
GemmMicrokernelTester()
.mr(4)
.nr(4)
@@ -55274,10 +55274,10 @@
.m(4)
.n(4)
.k(1)
- .Test(xnn_qu8_igemm_minmax_fp32_ukernel_4x4__scalar_magic, xnn_init_qu8_conv_minmax_fp32_scalar_magic_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
+ .Test(xnn_qu8_igemm_minmax_fp32_ukernel_4x4__scalar_fmagic, xnn_init_qu8_conv_minmax_fp32_scalar_fmagic_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
}
-TEST(QU8_IGEMM_MINMAX_FP32_4X4__SCALAR_MAGIC, strided_cn) {
+TEST(QU8_IGEMM_MINMAX_FP32_4X4__SCALAR_FMAGIC, strided_cn) {
GemmMicrokernelTester()
.mr(4)
.nr(4)
@@ -55287,10 +55287,10 @@
.n(4)
.k(1)
.cn_stride(7)
- .Test(xnn_qu8_igemm_minmax_fp32_ukernel_4x4__scalar_magic, xnn_init_qu8_conv_minmax_fp32_scalar_magic_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
+ .Test(xnn_qu8_igemm_minmax_fp32_ukernel_4x4__scalar_fmagic, xnn_init_qu8_conv_minmax_fp32_scalar_fmagic_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
}
-TEST(QU8_IGEMM_MINMAX_FP32_4X4__SCALAR_MAGIC, k_eq_1_subtile) {
+TEST(QU8_IGEMM_MINMAX_FP32_4X4__SCALAR_FMAGIC, k_eq_1_subtile) {
for (uint32_t m = 1; m <= 4; m++) {
for (uint32_t n = 1; n <= 4; n++) {
GemmMicrokernelTester()
@@ -55302,12 +55302,12 @@
.n(n)
.k(1)
.iterations(1)
- .Test(xnn_qu8_igemm_minmax_fp32_ukernel_4x4__scalar_magic, xnn_init_qu8_conv_minmax_fp32_scalar_magic_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
+ .Test(xnn_qu8_igemm_minmax_fp32_ukernel_4x4__scalar_fmagic, xnn_init_qu8_conv_minmax_fp32_scalar_fmagic_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
}
}
}
-TEST(QU8_IGEMM_MINMAX_FP32_4X4__SCALAR_MAGIC, k_eq_1_subtile_m) {
+TEST(QU8_IGEMM_MINMAX_FP32_4X4__SCALAR_FMAGIC, k_eq_1_subtile_m) {
for (uint32_t m = 1; m <= 4; m++) {
GemmMicrokernelTester()
.mr(4)
@@ -55318,11 +55318,11 @@
.n(4)
.k(1)
.iterations(1)
- .Test(xnn_qu8_igemm_minmax_fp32_ukernel_4x4__scalar_magic, xnn_init_qu8_conv_minmax_fp32_scalar_magic_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
+ .Test(xnn_qu8_igemm_minmax_fp32_ukernel_4x4__scalar_fmagic, xnn_init_qu8_conv_minmax_fp32_scalar_fmagic_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
}
}
-TEST(QU8_IGEMM_MINMAX_FP32_4X4__SCALAR_MAGIC, k_eq_1_subtile_n) {
+TEST(QU8_IGEMM_MINMAX_FP32_4X4__SCALAR_FMAGIC, k_eq_1_subtile_n) {
for (uint32_t n = 1; n <= 4; n++) {
GemmMicrokernelTester()
.mr(4)
@@ -55333,11 +55333,11 @@
.n(n)
.k(1)
.iterations(1)
- .Test(xnn_qu8_igemm_minmax_fp32_ukernel_4x4__scalar_magic, xnn_init_qu8_conv_minmax_fp32_scalar_magic_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
+ .Test(xnn_qu8_igemm_minmax_fp32_ukernel_4x4__scalar_fmagic, xnn_init_qu8_conv_minmax_fp32_scalar_fmagic_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
}
}
-TEST(QU8_IGEMM_MINMAX_FP32_4X4__SCALAR_MAGIC, k_gt_1) {
+TEST(QU8_IGEMM_MINMAX_FP32_4X4__SCALAR_FMAGIC, k_gt_1) {
for (size_t k = 2; k < 10; k++) {
GemmMicrokernelTester()
.mr(4)
@@ -55347,11 +55347,11 @@
.m(4)
.n(4)
.k(k)
- .Test(xnn_qu8_igemm_minmax_fp32_ukernel_4x4__scalar_magic, xnn_init_qu8_conv_minmax_fp32_scalar_magic_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
+ .Test(xnn_qu8_igemm_minmax_fp32_ukernel_4x4__scalar_fmagic, xnn_init_qu8_conv_minmax_fp32_scalar_fmagic_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
}
}
-TEST(QU8_IGEMM_MINMAX_FP32_4X4__SCALAR_MAGIC, k_gt_1_subtile) {
+TEST(QU8_IGEMM_MINMAX_FP32_4X4__SCALAR_FMAGIC, k_gt_1_subtile) {
for (size_t k = 2; k < 10; k++) {
for (uint32_t m = 1; m <= 4; m++) {
for (uint32_t n = 1; n <= 4; n++) {
@@ -55364,13 +55364,13 @@
.n(n)
.k(k)
.iterations(1)
- .Test(xnn_qu8_igemm_minmax_fp32_ukernel_4x4__scalar_magic, xnn_init_qu8_conv_minmax_fp32_scalar_magic_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
+ .Test(xnn_qu8_igemm_minmax_fp32_ukernel_4x4__scalar_fmagic, xnn_init_qu8_conv_minmax_fp32_scalar_fmagic_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
}
}
}
}
-TEST(QU8_IGEMM_MINMAX_FP32_4X4__SCALAR_MAGIC, n_gt_4) {
+TEST(QU8_IGEMM_MINMAX_FP32_4X4__SCALAR_FMAGIC, n_gt_4) {
for (uint32_t n = 5; n < 8; n++) {
for (size_t k = 1; k <= 5; k += 2) {
GemmMicrokernelTester()
@@ -55381,12 +55381,12 @@
.m(4)
.n(4)
.k(k)
- .Test(xnn_qu8_igemm_minmax_fp32_ukernel_4x4__scalar_magic, xnn_init_qu8_conv_minmax_fp32_scalar_magic_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
+ .Test(xnn_qu8_igemm_minmax_fp32_ukernel_4x4__scalar_fmagic, xnn_init_qu8_conv_minmax_fp32_scalar_fmagic_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
}
}
}
-TEST(QU8_IGEMM_MINMAX_FP32_4X4__SCALAR_MAGIC, n_gt_4_strided_cn) {
+TEST(QU8_IGEMM_MINMAX_FP32_4X4__SCALAR_FMAGIC, n_gt_4_strided_cn) {
for (uint32_t n = 5; n < 8; n++) {
for (size_t k = 1; k <= 5; k += 2) {
GemmMicrokernelTester()
@@ -55398,12 +55398,12 @@
.n(4)
.k(k)
.cn_stride(7)
- .Test(xnn_qu8_igemm_minmax_fp32_ukernel_4x4__scalar_magic, xnn_init_qu8_conv_minmax_fp32_scalar_magic_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
+ .Test(xnn_qu8_igemm_minmax_fp32_ukernel_4x4__scalar_fmagic, xnn_init_qu8_conv_minmax_fp32_scalar_fmagic_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
}
}
}
-TEST(QU8_IGEMM_MINMAX_FP32_4X4__SCALAR_MAGIC, n_gt_4_subtile) {
+TEST(QU8_IGEMM_MINMAX_FP32_4X4__SCALAR_FMAGIC, n_gt_4_subtile) {
for (uint32_t n = 5; n < 8; n++) {
for (size_t k = 1; k <= 5; k += 2) {
for (uint32_t m = 1; m <= 4; m++) {
@@ -55416,13 +55416,13 @@
.n(n)
.k(k)
.iterations(1)
- .Test(xnn_qu8_igemm_minmax_fp32_ukernel_4x4__scalar_magic, xnn_init_qu8_conv_minmax_fp32_scalar_magic_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
+ .Test(xnn_qu8_igemm_minmax_fp32_ukernel_4x4__scalar_fmagic, xnn_init_qu8_conv_minmax_fp32_scalar_fmagic_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
}
}
}
}
-TEST(QU8_IGEMM_MINMAX_FP32_4X4__SCALAR_MAGIC, n_div_4) {
+TEST(QU8_IGEMM_MINMAX_FP32_4X4__SCALAR_FMAGIC, n_div_4) {
for (uint32_t n = 8; n <= 12; n += 4) {
for (size_t k = 1; k <= 5; k += 2) {
GemmMicrokernelTester()
@@ -55433,12 +55433,12 @@
.m(4)
.n(4)
.k(k)
- .Test(xnn_qu8_igemm_minmax_fp32_ukernel_4x4__scalar_magic, xnn_init_qu8_conv_minmax_fp32_scalar_magic_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
+ .Test(xnn_qu8_igemm_minmax_fp32_ukernel_4x4__scalar_fmagic, xnn_init_qu8_conv_minmax_fp32_scalar_fmagic_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
}
}
}
-TEST(QU8_IGEMM_MINMAX_FP32_4X4__SCALAR_MAGIC, n_div_4_strided_cn) {
+TEST(QU8_IGEMM_MINMAX_FP32_4X4__SCALAR_FMAGIC, n_div_4_strided_cn) {
for (uint32_t n = 8; n <= 12; n += 4) {
for (size_t k = 1; k <= 5; k += 2) {
GemmMicrokernelTester()
@@ -55450,12 +55450,12 @@
.n(n)
.k(k)
.cn_stride(7)
- .Test(xnn_qu8_igemm_minmax_fp32_ukernel_4x4__scalar_magic, xnn_init_qu8_conv_minmax_fp32_scalar_magic_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
+ .Test(xnn_qu8_igemm_minmax_fp32_ukernel_4x4__scalar_fmagic, xnn_init_qu8_conv_minmax_fp32_scalar_fmagic_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
}
}
}
-TEST(QU8_IGEMM_MINMAX_FP32_4X4__SCALAR_MAGIC, n_div_4_subtile) {
+TEST(QU8_IGEMM_MINMAX_FP32_4X4__SCALAR_FMAGIC, n_div_4_subtile) {
for (uint32_t n = 8; n <= 12; n += 4) {
for (size_t k = 1; k <= 5; k += 2) {
for (uint32_t m = 1; m <= 4; m++) {
@@ -55468,13 +55468,13 @@
.n(n)
.k(k)
.iterations(1)
- .Test(xnn_qu8_igemm_minmax_fp32_ukernel_4x4__scalar_magic, xnn_init_qu8_conv_minmax_fp32_scalar_magic_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
+ .Test(xnn_qu8_igemm_minmax_fp32_ukernel_4x4__scalar_fmagic, xnn_init_qu8_conv_minmax_fp32_scalar_fmagic_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
}
}
}
}
-TEST(QU8_IGEMM_MINMAX_FP32_4X4__SCALAR_MAGIC, small_kernel) {
+TEST(QU8_IGEMM_MINMAX_FP32_4X4__SCALAR_FMAGIC, small_kernel) {
for (size_t k = 1; k <= 5; k += 2) {
GemmMicrokernelTester()
.mr(4)
@@ -55485,11 +55485,11 @@
.n(4)
.k(k)
.ks(3)
- .Test(xnn_qu8_igemm_minmax_fp32_ukernel_4x4__scalar_magic, xnn_init_qu8_conv_minmax_fp32_scalar_magic_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
+ .Test(xnn_qu8_igemm_minmax_fp32_ukernel_4x4__scalar_fmagic, xnn_init_qu8_conv_minmax_fp32_scalar_fmagic_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
}
}
-TEST(QU8_IGEMM_MINMAX_FP32_4X4__SCALAR_MAGIC, small_kernel_subtile) {
+TEST(QU8_IGEMM_MINMAX_FP32_4X4__SCALAR_FMAGIC, small_kernel_subtile) {
for (size_t k = 1; k <= 5; k += 2) {
for (uint32_t m = 1; m <= 4; m++) {
for (uint32_t n = 1; n <= 4; n++) {
@@ -55503,13 +55503,13 @@
.k(k)
.ks(3)
.iterations(1)
- .Test(xnn_qu8_igemm_minmax_fp32_ukernel_4x4__scalar_magic, xnn_init_qu8_conv_minmax_fp32_scalar_magic_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
+ .Test(xnn_qu8_igemm_minmax_fp32_ukernel_4x4__scalar_fmagic, xnn_init_qu8_conv_minmax_fp32_scalar_fmagic_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
}
}
}
}
-TEST(QU8_IGEMM_MINMAX_FP32_4X4__SCALAR_MAGIC, n_gt_4_small_kernel) {
+TEST(QU8_IGEMM_MINMAX_FP32_4X4__SCALAR_FMAGIC, n_gt_4_small_kernel) {
for (uint32_t n = 5; n < 8; n++) {
for (size_t k = 1; k <= 5; k += 2) {
GemmMicrokernelTester()
@@ -55521,12 +55521,12 @@
.n(4)
.k(k)
.ks(3)
- .Test(xnn_qu8_igemm_minmax_fp32_ukernel_4x4__scalar_magic, xnn_init_qu8_conv_minmax_fp32_scalar_magic_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
+ .Test(xnn_qu8_igemm_minmax_fp32_ukernel_4x4__scalar_fmagic, xnn_init_qu8_conv_minmax_fp32_scalar_fmagic_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
}
}
}
-TEST(QU8_IGEMM_MINMAX_FP32_4X4__SCALAR_MAGIC, n_div_4_small_kernel) {
+TEST(QU8_IGEMM_MINMAX_FP32_4X4__SCALAR_FMAGIC, n_div_4_small_kernel) {
for (uint32_t n = 8; n <= 12; n += 4) {
for (size_t k = 1; k <= 5; k += 2) {
GemmMicrokernelTester()
@@ -55538,12 +55538,12 @@
.n(4)
.k(k)
.ks(3)
- .Test(xnn_qu8_igemm_minmax_fp32_ukernel_4x4__scalar_magic, xnn_init_qu8_conv_minmax_fp32_scalar_magic_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
+ .Test(xnn_qu8_igemm_minmax_fp32_ukernel_4x4__scalar_fmagic, xnn_init_qu8_conv_minmax_fp32_scalar_fmagic_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
}
}
}
-TEST(QU8_IGEMM_MINMAX_FP32_4X4__SCALAR_MAGIC, strided_cm_subtile) {
+TEST(QU8_IGEMM_MINMAX_FP32_4X4__SCALAR_FMAGIC, strided_cm_subtile) {
for (size_t k = 1; k <= 5; k += 2) {
for (uint32_t m = 1; m <= 4; m++) {
for (uint32_t n = 1; n <= 4; n++) {
@@ -55557,13 +55557,13 @@
.k(k)
.cm_stride(7)
.iterations(1)
- .Test(xnn_qu8_igemm_minmax_fp32_ukernel_4x4__scalar_magic, xnn_init_qu8_conv_minmax_fp32_scalar_magic_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
+ .Test(xnn_qu8_igemm_minmax_fp32_ukernel_4x4__scalar_fmagic, xnn_init_qu8_conv_minmax_fp32_scalar_fmagic_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
}
}
}
}
-TEST(QU8_IGEMM_MINMAX_FP32_4X4__SCALAR_MAGIC, a_offset) {
+TEST(QU8_IGEMM_MINMAX_FP32_4X4__SCALAR_FMAGIC, a_offset) {
for (size_t k = 1; k <= 5; k += 2) {
GemmMicrokernelTester()
.mr(4)
@@ -55575,11 +55575,11 @@
.k(k)
.ks(3)
.a_offset(23)
- .Test(xnn_qu8_igemm_minmax_fp32_ukernel_4x4__scalar_magic, xnn_init_qu8_conv_minmax_fp32_scalar_magic_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
+ .Test(xnn_qu8_igemm_minmax_fp32_ukernel_4x4__scalar_fmagic, xnn_init_qu8_conv_minmax_fp32_scalar_fmagic_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
}
}
-TEST(QU8_IGEMM_MINMAX_FP32_4X4__SCALAR_MAGIC, zero) {
+TEST(QU8_IGEMM_MINMAX_FP32_4X4__SCALAR_FMAGIC, zero) {
for (uint32_t mz = 0; mz < 4; mz++) {
for (size_t k = 1; k <= 5; k += 2) {
GemmMicrokernelTester()
@@ -55593,12 +55593,12 @@
.ks(3)
.a_offset(23)
.zero_index(mz)
- .Test(xnn_qu8_igemm_minmax_fp32_ukernel_4x4__scalar_magic, xnn_init_qu8_conv_minmax_fp32_scalar_magic_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
+ .Test(xnn_qu8_igemm_minmax_fp32_ukernel_4x4__scalar_fmagic, xnn_init_qu8_conv_minmax_fp32_scalar_fmagic_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
}
}
}
-TEST(QU8_IGEMM_MINMAX_FP32_4X4__SCALAR_MAGIC, qmin) {
+TEST(QU8_IGEMM_MINMAX_FP32_4X4__SCALAR_FMAGIC, qmin) {
GemmMicrokernelTester()
.mr(4)
.nr(4)
@@ -55608,10 +55608,10 @@
.n(4)
.k(1)
.qmin(128)
- .Test(xnn_qu8_igemm_minmax_fp32_ukernel_4x4__scalar_magic, xnn_init_qu8_conv_minmax_fp32_scalar_magic_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
+ .Test(xnn_qu8_igemm_minmax_fp32_ukernel_4x4__scalar_fmagic, xnn_init_qu8_conv_minmax_fp32_scalar_fmagic_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
}
-TEST(QU8_IGEMM_MINMAX_FP32_4X4__SCALAR_MAGIC, qmax) {
+TEST(QU8_IGEMM_MINMAX_FP32_4X4__SCALAR_FMAGIC, qmax) {
GemmMicrokernelTester()
.mr(4)
.nr(4)
@@ -55621,10 +55621,10 @@
.n(4)
.k(1)
.qmax(128)
- .Test(xnn_qu8_igemm_minmax_fp32_ukernel_4x4__scalar_magic, xnn_init_qu8_conv_minmax_fp32_scalar_magic_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
+ .Test(xnn_qu8_igemm_minmax_fp32_ukernel_4x4__scalar_fmagic, xnn_init_qu8_conv_minmax_fp32_scalar_fmagic_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
}
-TEST(QU8_IGEMM_MINMAX_FP32_4X4__SCALAR_MAGIC, strided_cm) {
+TEST(QU8_IGEMM_MINMAX_FP32_4X4__SCALAR_FMAGIC, strided_cm) {
GemmMicrokernelTester()
.mr(4)
.nr(4)
@@ -55634,10 +55634,10 @@
.n(4)
.k(1)
.cm_stride(7)
- .Test(xnn_qu8_igemm_minmax_fp32_ukernel_4x4__scalar_magic, xnn_init_qu8_conv_minmax_fp32_scalar_magic_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
+ .Test(xnn_qu8_igemm_minmax_fp32_ukernel_4x4__scalar_fmagic, xnn_init_qu8_conv_minmax_fp32_scalar_fmagic_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
}
-TEST(QU8_IGEMM_MINMAX_FP32_4X4__SCALAR_MAGIC, no_a_zero_point) {
+TEST(QU8_IGEMM_MINMAX_FP32_4X4__SCALAR_FMAGIC, no_a_zero_point) {
for (size_t k = 1; k <= 5; k += 2) {
GemmMicrokernelTester()
.mr(4)
@@ -55648,11 +55648,11 @@
.n(4)
.k(k)
.a_zero_point(0)
- .Test(xnn_qu8_igemm_minmax_fp32_ukernel_4x4__scalar_magic, xnn_init_qu8_conv_minmax_fp32_scalar_magic_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
+ .Test(xnn_qu8_igemm_minmax_fp32_ukernel_4x4__scalar_fmagic, xnn_init_qu8_conv_minmax_fp32_scalar_fmagic_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
}
}
-TEST(QU8_IGEMM_MINMAX_FP32_4X4__SCALAR_MAGIC, no_b_zero_point) {
+TEST(QU8_IGEMM_MINMAX_FP32_4X4__SCALAR_FMAGIC, no_b_zero_point) {
for (size_t k = 1; k <= 5; k += 2) {
GemmMicrokernelTester()
.mr(4)
@@ -55663,11 +55663,11 @@
.n(4)
.k(k)
.b_zero_point(0)
- .Test(xnn_qu8_igemm_minmax_fp32_ukernel_4x4__scalar_magic, xnn_init_qu8_conv_minmax_fp32_scalar_magic_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
+ .Test(xnn_qu8_igemm_minmax_fp32_ukernel_4x4__scalar_fmagic, xnn_init_qu8_conv_minmax_fp32_scalar_fmagic_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
}
}
-TEST(QU8_IGEMM_MINMAX_FP32_4X4__SCALAR_MAGIC, no_zero_point) {
+TEST(QU8_IGEMM_MINMAX_FP32_4X4__SCALAR_FMAGIC, no_zero_point) {
for (size_t k = 1; k <= 5; k += 2) {
GemmMicrokernelTester()
.mr(4)
@@ -55679,6 +55679,6 @@
.k(k)
.a_zero_point(0)
.b_zero_point(0)
- .Test(xnn_qu8_igemm_minmax_fp32_ukernel_4x4__scalar_magic, xnn_init_qu8_conv_minmax_fp32_scalar_magic_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
+ .Test(xnn_qu8_igemm_minmax_fp32_ukernel_4x4__scalar_fmagic, xnn_init_qu8_conv_minmax_fp32_scalar_fmagic_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
}
}
\ No newline at end of file
diff --git a/test/qu8-igemm-minmax-fp32.yaml b/test/qu8-igemm-minmax-fp32.yaml
index 9c419a4..e38e345 100644
--- a/test/qu8-igemm-minmax-fp32.yaml
+++ b/test/qu8-igemm-minmax-fp32.yaml
@@ -292,51 +292,51 @@
- name: xnn_qu8_igemm_minmax_fp32_ukernel_3x4c8__wasmsimd_mul32_ld128
init: xnn_init_qu8_conv_minmax_fp32_wasmsimd_params
k-block: 8
-- name: xnn_qu8_igemm_minmax_fp32_ukernel_1x2__scalar_lrint
- init: xnn_init_qu8_conv_minmax_fp32_scalar_lrint_params
+- name: xnn_qu8_igemm_minmax_fp32_ukernel_1x2__scalar_lrintf
+ init: xnn_init_qu8_conv_minmax_fp32_scalar_lrintf_params
k-block: 1
-- name: xnn_qu8_igemm_minmax_fp32_ukernel_2x2__scalar_lrint
- init: xnn_init_qu8_conv_minmax_fp32_scalar_lrint_params
+- name: xnn_qu8_igemm_minmax_fp32_ukernel_2x2__scalar_lrintf
+ init: xnn_init_qu8_conv_minmax_fp32_scalar_lrintf_params
k-block: 1
-- name: xnn_qu8_igemm_minmax_fp32_ukernel_3x2__scalar_lrint
- init: xnn_init_qu8_conv_minmax_fp32_scalar_lrint_params
+- name: xnn_qu8_igemm_minmax_fp32_ukernel_3x2__scalar_lrintf
+ init: xnn_init_qu8_conv_minmax_fp32_scalar_lrintf_params
k-block: 1
-- name: xnn_qu8_igemm_minmax_fp32_ukernel_4x2__scalar_lrint
- init: xnn_init_qu8_conv_minmax_fp32_scalar_lrint_params
+- name: xnn_qu8_igemm_minmax_fp32_ukernel_4x2__scalar_lrintf
+ init: xnn_init_qu8_conv_minmax_fp32_scalar_lrintf_params
k-block: 1
-- name: xnn_qu8_igemm_minmax_fp32_ukernel_1x4__scalar_lrint
- init: xnn_init_qu8_conv_minmax_fp32_scalar_lrint_params
+- name: xnn_qu8_igemm_minmax_fp32_ukernel_1x4__scalar_lrintf
+ init: xnn_init_qu8_conv_minmax_fp32_scalar_lrintf_params
k-block: 1
-- name: xnn_qu8_igemm_minmax_fp32_ukernel_2x4__scalar_lrint
- init: xnn_init_qu8_conv_minmax_fp32_scalar_lrint_params
+- name: xnn_qu8_igemm_minmax_fp32_ukernel_2x4__scalar_lrintf
+ init: xnn_init_qu8_conv_minmax_fp32_scalar_lrintf_params
k-block: 1
-- name: xnn_qu8_igemm_minmax_fp32_ukernel_3x4__scalar_lrint
- init: xnn_init_qu8_conv_minmax_fp32_scalar_lrint_params
+- name: xnn_qu8_igemm_minmax_fp32_ukernel_3x4__scalar_lrintf
+ init: xnn_init_qu8_conv_minmax_fp32_scalar_lrintf_params
k-block: 1
-- name: xnn_qu8_igemm_minmax_fp32_ukernel_4x4__scalar_lrint
- init: xnn_init_qu8_conv_minmax_fp32_scalar_lrint_params
+- name: xnn_qu8_igemm_minmax_fp32_ukernel_4x4__scalar_lrintf
+ init: xnn_init_qu8_conv_minmax_fp32_scalar_lrintf_params
k-block: 1
-- name: xnn_qu8_igemm_minmax_fp32_ukernel_1x2__scalar_magic
- init: xnn_init_qu8_conv_minmax_fp32_scalar_magic_params
+- name: xnn_qu8_igemm_minmax_fp32_ukernel_1x2__scalar_fmagic
+ init: xnn_init_qu8_conv_minmax_fp32_scalar_fmagic_params
k-block: 1
-- name: xnn_qu8_igemm_minmax_fp32_ukernel_2x2__scalar_magic
- init: xnn_init_qu8_conv_minmax_fp32_scalar_magic_params
+- name: xnn_qu8_igemm_minmax_fp32_ukernel_2x2__scalar_fmagic
+ init: xnn_init_qu8_conv_minmax_fp32_scalar_fmagic_params
k-block: 1
-- name: xnn_qu8_igemm_minmax_fp32_ukernel_3x2__scalar_magic
- init: xnn_init_qu8_conv_minmax_fp32_scalar_magic_params
+- name: xnn_qu8_igemm_minmax_fp32_ukernel_3x2__scalar_fmagic
+ init: xnn_init_qu8_conv_minmax_fp32_scalar_fmagic_params
k-block: 1
-- name: xnn_qu8_igemm_minmax_fp32_ukernel_4x2__scalar_magic
- init: xnn_init_qu8_conv_minmax_fp32_scalar_magic_params
+- name: xnn_qu8_igemm_minmax_fp32_ukernel_4x2__scalar_fmagic
+ init: xnn_init_qu8_conv_minmax_fp32_scalar_fmagic_params
k-block: 1
-- name: xnn_qu8_igemm_minmax_fp32_ukernel_1x4__scalar_magic
- init: xnn_init_qu8_conv_minmax_fp32_scalar_magic_params
+- name: xnn_qu8_igemm_minmax_fp32_ukernel_1x4__scalar_fmagic
+ init: xnn_init_qu8_conv_minmax_fp32_scalar_fmagic_params
k-block: 1
-- name: xnn_qu8_igemm_minmax_fp32_ukernel_2x4__scalar_magic
- init: xnn_init_qu8_conv_minmax_fp32_scalar_magic_params
+- name: xnn_qu8_igemm_minmax_fp32_ukernel_2x4__scalar_fmagic
+ init: xnn_init_qu8_conv_minmax_fp32_scalar_fmagic_params
k-block: 1
-- name: xnn_qu8_igemm_minmax_fp32_ukernel_3x4__scalar_magic
- init: xnn_init_qu8_conv_minmax_fp32_scalar_magic_params
+- name: xnn_qu8_igemm_minmax_fp32_ukernel_3x4__scalar_fmagic
+ init: xnn_init_qu8_conv_minmax_fp32_scalar_fmagic_params
k-block: 1
-- name: xnn_qu8_igemm_minmax_fp32_ukernel_4x4__scalar_magic
- init: xnn_init_qu8_conv_minmax_fp32_scalar_magic_params
+- name: xnn_qu8_igemm_minmax_fp32_ukernel_4x4__scalar_fmagic
+ init: xnn_init_qu8_conv_minmax_fp32_scalar_fmagic_params
k-block: 1
diff --git a/test/qu8-requantization.cc b/test/qu8-requantization.cc
index eb5e50e..dd9aa95 100644
--- a/test/qu8-requantization.cc
+++ b/test/qu8-requantization.cc
@@ -284,12 +284,12 @@
* FP32-based scalar implementation using magic trick for FP32->INT32 conversion.
*/
-TEST(QU8_FP32__SCALAR_MAGIC, random_cases) {
+TEST(QU8_FP32__SCALAR_FMAGIC, random_cases) {
RequantizationTester()
.qmin(std::numeric_limits<uint8_t>::min())
.qmax(std::numeric_limits<uint8_t>::max())
.iterations(1000)
- .TestRandomCasesApproximate(xnn_qu8_requantize_fp32__scalar_magic);
+ .TestRandomCasesApproximate(xnn_qu8_requantize_fp32__scalar_fmagic);
}