Avoid x18 register

x18 is reserved by the OS.

Instead of loading cn_stride from stack, defer the load
until clamping with min/max and use x0 that is free at that
point.

MobileNetV2 end2end
CPU   Before After
a53   96055  96306
a55  111923 111956
a57   83247  83229
a72   61360  62700
a73   53267  53094
a75   39247  39136
a76   21868  21880
a77   20754  20812
kryo  47640  47671
m1    40545  40814
m2    44468  44656
m3    19333  19358
m4    16660  17206

PiperOrigin-RevId: 315937599
diff --git a/src/f16-gemm/6x8-aarch64-neonfp16arith-ld64.S.in b/src/f16-gemm/6x8-aarch64-neonfp16arith-ld64.S.in
index d20a49c..b4b784b 100644
--- a/src/f16-gemm/6x8-aarch64-neonfp16arith-ld64.S.in
+++ b/src/f16-gemm/6x8-aarch64-neonfp16arith-ld64.S.in
@@ -14,7 +14,7 @@
 #     const void*restrict w,    x5
 #     uint8_t*restrict c,       x6
 #     size_t cm_stride,         x7
-#     size_t cn_stride,         [sp] -> x14
+#     size_t cn_stride,         [sp] -> (x0)
 $if INC:
   #     const float*restrict acc,  [sp + 8] -> x15
   #     const union xnn_f16_scaleminmax_params params[restrict XNN_MIN_ELEMENTS(1)])  [sp + 16] -> x8
@@ -35,7 +35,7 @@
 #  x6 c0
 # x16 c1
 # x17 c2
-# x18 c3
+# x14 c3
 # x13 c4
 #  x7 c5
 
@@ -61,13 +61,11 @@
 BEGIN_FUNCTION xnn_f16_gemm${"inc" if INC else ""}_minmax_ukernel_6x8__aarch64_neonfp16arith_ld64
 
         $if INC:
-          # Load cn_stride, acc
-          LDP x14, x15, [sp]
-          # Load params pointer
-          LDR x8, [sp, 16]
+          # Load acc, params pointer
+          LDP x15, x8, [sp, 8]
         $else:
-          # Load cn_stride, params pointer
-          LDP x14, x8, [sp]
+          # Load params pointer
+          LDR x8, [sp, 8]
 
         # Clamp A and C pointers
         CMP x0, 2                // if mr < 2
@@ -84,15 +82,15 @@
 
         CMP x0, 4                // if mr < 4
         ADD x11, x10, x4         // a3 = a2 + a_stride
-        ADD x18, x17, x7         // c3 = c2 + cm_stride
+        ADD x14, x17, x7         // c3 = c2 + cm_stride
         CSEL x11, x10, x11, LO   //   a3 = a2
-        CSEL x18, x17, x18, LO   //   c3 = c2
+        CSEL x14, x17, x14, LO   //   c3 = c2
 
         ADD x12, x11, x4         // a4 = a3 + a_stride
-        ADD x13, x18, x7         // c4 = c3 + cm_stride
+        ADD x13, x14, x7         // c4 = c3 + cm_stride
                                  // if mr <= 4
         CSEL x12, x11, x12, LS   //   a4 = a3
-        CSEL x13, x18, x13, LS   //   c4 = c3
+        CSEL x13, x14, x13, LS   //   c4 = c3
 
         CMP x0, 6                // if mr < 6
         ADD x4, x12, x4          // a5 = a4 + a_stride
@@ -180,13 +178,15 @@
         FMUL v26.8h, v26.8h, v6.8h
         FMUL v28.8h, v28.8h, v6.8h
         FMUL v30.8h, v30.8h, v6.8h
-        SUBS x1, x1, 8
+        # Load cn_stride
+        LDR x0, [sp, 0]
         FMAX v20.8h, v20.8h, v4.8h
         FMAX v22.8h, v22.8h, v4.8h
         FMAX v24.8h, v24.8h, v4.8h
         FMAX v26.8h, v26.8h, v4.8h
         FMAX v28.8h, v28.8h, v4.8h
         FMAX v30.8h, v30.8h, v4.8h
+        SUBS x1, x1, 8
         FMIN v20.8h, v20.8h, v5.8h
         FMIN v22.8h, v22.8h, v5.8h
         FMIN v24.8h, v24.8h, v5.8h
@@ -198,30 +198,30 @@
         B.LO 8f
 
         $if INC:
-          ST1 {v30.16b},  [x7], x14
+          ST1 {v30.16b},  [x7], x0
           SUB  x3,  x3, x2 // a0 -= kc
-          ST1 {v28.16b}, [x13], x14
+          ST1 {v28.16b}, [x13], x0
           SUB  x9,  x9, x2 // a1 -= kc
-          ST1 {v26.16b}, [x18], x14
+          ST1 {v26.16b}, [x14], x0
           SUB x10, x10, x2 // a2 -= kc
-          ST1 {v24.16b}, [x17], x14
+          ST1 {v24.16b}, [x17], x0
           SUB x11, x11, x2 // a3 -= kc
-          ST1 {v22.16b}, [x16], x14
+          ST1 {v22.16b}, [x16], x0
           SUB x12, x12, x2 // a4 -= kc
-          ST1 {v20.16b},  [x6], x14
+          ST1 {v20.16b},  [x6], x0
           SUB  x4,  x4, x2 // a5 -= kc
         $else:
-          ST1 {v20.16b},  [x6], x14
+          ST1 {v20.16b},  [x6], x0
           SUB  x3,  x3, x2 // a0 -= kc
-          ST1 {v22.16b}, [x16], x14
+          ST1 {v22.16b}, [x16], x0
           SUB  x9,  x9, x2 // a1 -= kc
-          ST1 {v24.16b}, [x17], x14
+          ST1 {v24.16b}, [x17], x0
           SUB x10, x10, x2 // a2 -= kc
-          ST1 {v26.16b}, [x18], x14
+          ST1 {v26.16b}, [x14], x0
           SUB x11, x11, x2 // a3 -= kc
-          ST1 {v28.16b}, [x13], x14
+          ST1 {v28.16b}, [x13], x0
           SUB x12, x12, x2 // a4 -= kc
-          ST1 {v30.16b},  [x7], x14
+          ST1 {v30.16b},  [x7], x0
           SUB  x4,  x4, x2 // a5 -= kc
 
         B.HI 0b
@@ -281,7 +281,7 @@
           DUP d30, v30.d[1]
           STR d28, [x13], 8
           DUP d28, v28.d[1]
-          STR d26, [x18], 8
+          STR d26, [x14], 8
           DUP d26, v26.d[1]
           STR d24, [x17], 8
           DUP d24, v24.d[1]
@@ -296,7 +296,7 @@
           DUP d22, v22.d[1]
           STR d24, [x17], 8
           DUP d24, v24.d[1]
-          STR d26, [x18], 8
+          STR d26, [x14], 8
           DUP d26, v26.d[1]
           STR d28, [x13], 8
           DUP d28, v28.d[1]
@@ -310,7 +310,7 @@
           DUP s30, v30.s[1]
           STR s28, [x13], 4
           DUP s28, v28.s[1]
-          STR s26, [x18], 4
+          STR s26, [x14], 4
           DUP s26, v26.s[1]
           STR s24, [x17], 4
           DUP s24, v24.s[1]
@@ -325,7 +325,7 @@
           DUP s22, v22.s[1]
           STR s24, [x17], 4
           DUP s24, v24.s[1]
-          STR s26, [x18], 4
+          STR s26, [x14], 4
           DUP s26, v26.s[1]
           STR s28, [x13], 4
           DUP s28, v28.s[1]
@@ -337,7 +337,7 @@
         $if INC:
           STR h30,  [x7]
           STR h28, [x13]
-          STR h26, [x18]
+          STR h26, [x14]
           STR h24, [x17]
           STR h22, [x16]
           STR h20,  [x6]
@@ -345,7 +345,7 @@
           STR h20,  [x6]
           STR h22, [x16]
           STR h24, [x17]
-          STR h26, [x18]
+          STR h26, [x14]
           STR h28, [x13]
           STR h30,  [x7]
 11: