Enable 2x16 for QU8 neon lane microkernel in AArch32

- For aarch32, 2x16 is 1.5x faster than 4x8 with intrinsics.
- Non-prefetch versions for now.
- Remove 4x8, 1x8 from PROD builds and sort build files.

PiperOrigin-RevId: 420815377
diff --git a/BUILD.bazel b/BUILD.bazel
index ef11e04..8aaeeda 100644
--- a/BUILD.bazel
+++ b/BUILD.bazel
@@ -1244,22 +1244,22 @@
     "src/x32-packx/x2-scalar.c",
     "src/x32-packx/x3-scalar.c",
     "src/x32-packx/x4-scalar.c",
-    "src/x32-transpose/gen/1x2-scalar-int.c",
-    "src/x32-transpose/gen/1x4-scalar-int.c",
-    "src/x32-transpose/gen/2x1-scalar-int.c",
-    "src/x32-transpose/gen/2x2-scalar-int.c",
-    "src/x32-transpose/gen/2x4-scalar-int.c",
-    "src/x32-transpose/gen/4x1-scalar-int.c",
-    "src/x32-transpose/gen/4x2-scalar-int.c",
-    "src/x32-transpose/gen/4x4-scalar-int.c",
     "src/x32-transpose/gen/1x2-scalar-float.c",
+    "src/x32-transpose/gen/1x2-scalar-int.c",
     "src/x32-transpose/gen/1x4-scalar-float.c",
+    "src/x32-transpose/gen/1x4-scalar-int.c",
     "src/x32-transpose/gen/2x1-scalar-float.c",
+    "src/x32-transpose/gen/2x1-scalar-int.c",
     "src/x32-transpose/gen/2x2-scalar-float.c",
+    "src/x32-transpose/gen/2x2-scalar-int.c",
     "src/x32-transpose/gen/2x4-scalar-float.c",
+    "src/x32-transpose/gen/2x4-scalar-int.c",
     "src/x32-transpose/gen/4x1-scalar-float.c",
+    "src/x32-transpose/gen/4x1-scalar-int.c",
     "src/x32-transpose/gen/4x2-scalar-float.c",
+    "src/x32-transpose/gen/4x2-scalar-int.c",
     "src/x32-transpose/gen/4x4-scalar-float.c",
+    "src/x32-transpose/gen/4x4-scalar-int.c",
     "src/x32-unpool/scalar.c",
     "src/x32-zip/x2-scalar.c",
     "src/x32-zip/x3-scalar.c",
@@ -2604,15 +2604,11 @@
     "src/qu8-f32-vcvt/gen/vcvt-neon-x32.c",
     "src/qu8-gavgpool/7p7x-minmax-neon-c8.c",
     "src/qu8-gavgpool/7x-minmax-neon-c8.c",
-    "src/qu8-gemm/gen/1x8-minmax-rndnu-neon-mlal-lane.c",
     "src/qu8-gemm/gen/1x16-minmax-rndnu-neon-mlal-lane.c",
     "src/qu8-gemm/gen/2x16-minmax-rndnu-neon-mlal-lane.c",
-    "src/qu8-gemm/gen/4x8-minmax-rndnu-neon-mlal-lane.c",
     "src/qu8-gemm/gen/4x16-minmax-rndnu-neon-mlal-lane.c",
-    "src/qu8-igemm/gen/1x8-minmax-rndnu-neon-mlal-lane.c",
     "src/qu8-igemm/gen/1x16-minmax-rndnu-neon-mlal-lane.c",
     "src/qu8-igemm/gen/2x16-minmax-rndnu-neon-mlal-lane.c",
-    "src/qu8-igemm/gen/4x8-minmax-rndnu-neon-mlal-lane.c",
     "src/qu8-igemm/gen/4x16-minmax-rndnu-neon-mlal-lane.c",
     "src/qu8-vadd/gen/minmax-neon-ld64-x16.c",
     "src/qu8-vadd/gen/minmax-neon-ld64-x32.c",
@@ -5844,12 +5840,12 @@
 ]
 
 ALL_F16C_MICROKERNEL_SRCS = [
+    "src/f16-f32-vcvt/gen/vcvt-f16c-x8.c",
+    "src/f16-f32-vcvt/gen/vcvt-f16c-x16.c",
     "src/f16-vclamp/gen/vclamp-f16c-x8.c",
     "src/f16-vclamp/gen/vclamp-f16c-x16.c",
     "src/f16-vhswish/gen/vhswish-f16c-x8.c",
     "src/f16-vhswish/gen/vhswish-f16c-x16.c",
-    "src/f16-f32-vcvt/gen/vcvt-f16c-x8.c",
-    "src/f16-f32-vcvt/gen/vcvt-f16c-x16.c",
     "src/f32-f16-vcvt/gen/vcvt-f16c-x8.c",
     "src/f32-f16-vcvt/gen/vcvt-f16c-x16.c",
     "src/math/cvt-f16-f32-f16c.c",