Refactor argument names in QS8 VADD[C] microkernels

Rename input_x/input_y to input_a/input_b, and rename variables accordingly

PiperOrigin-RevId: 385693221
diff --git a/src/qs8-vaddc/avx2-mul32-ld64.c.in b/src/qs8-vaddc/avx2-mul32-ld64.c.in
index 85bf12f..dff3298 100644
--- a/src/qs8-vaddc/avx2-mul32-ld64.c.in
+++ b/src/qs8-vaddc/avx2-mul32-ld64.c.in
@@ -16,12 +16,12 @@
 
 void xnn_qs8_vaddc_minmax_ukernel__avx2_mul32_ld64_x${BATCH_TILE}(
     size_t n,
-    const int8_t* input_x,
-    const int8_t* input_y,
+    const int8_t* input_a,
+    const int8_t* input_b,
     int8_t* output,
     const union xnn_qs8_add_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_DISABLE_TSAN
 {
-  const __m256i vx_multiplier = _mm256_broadcastsi128_si256(_mm_load_si128((const __m128i*) params->sse2.x_multiplier));
+  const __m256i va_multiplier = _mm256_broadcastsi128_si256(_mm_load_si128((const __m128i*) params->sse2.x_multiplier));
   const __m256i vremainder_mask = _mm256_broadcastsi128_si256(_mm_load_si128((const __m128i*) params->sse2.remainder_mask));
   const __m256i vremainder_threshold = _mm256_broadcastsi128_si256(_mm_load_si128((const __m128i*) params->sse2.remainder_threshold));
   const __m128i vshift = _mm_cvtsi32_si128((int) params->sse2.shift);
@@ -35,16 +35,16 @@
     const __m128i voutput_max = _mm_load_si128((const __m128i*) params->sse2.output_max);
 
   __m256i vzero_point_product = _mm256_broadcastsi128_si256(_mm_add_epi32(
-    _mm_broadcastd_epi32(_mm_cvtsi32_si128(params->sse2.y_multiplier[0] * (int32_t) *input_y)),
+    _mm_broadcastd_epi32(_mm_cvtsi32_si128(params->sse2.y_multiplier[0] * (int32_t) *input_b)),
     _mm_load_si128((const __m128i*) params->sse2.zero_point_product)));
   for (; n >= ${BATCH_TILE} * sizeof(int8_t); n -= ${BATCH_TILE} * sizeof(int8_t)) {
-    const __m256i vx${ABC[0:8]} = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const __m128i*) input_x));
+    const __m256i va${ABC[0:8]} = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const __m128i*) input_a));
     $for N in range(8, BATCH_TILE, 8):
-      const __m256i vx${ABC[N:N+8]} = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const __m128i*) (input_x + ${N})));
-    input_x += ${BATCH_TILE};
+      const __m256i va${ABC[N:N+8]} = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const __m128i*) (input_a + ${N})));
+    input_a += ${BATCH_TILE};
 
     $for N in range(0, BATCH_TILE, 8):
-      __m256i vacc${ABC[N:N+8]} = _mm256_add_epi32(vzero_point_product, _mm256_mullo_epi32(vx${ABC[N:N+8]}, vx_multiplier));
+      __m256i vacc${ABC[N:N+8]} = _mm256_add_epi32(vzero_point_product, _mm256_mullo_epi32(va${ABC[N:N+8]}, va_multiplier));
 
     $for N in range(0, BATCH_TILE, 8):
       const __m256i vrem${ABC[N:N+8]} = _mm256_add_epi32(_mm256_and_si256(vacc${ABC[N:N+8]}, vremainder_mask), _mm256_srai_epi32(vacc${ABC[N:N+8]}, 31));
@@ -87,11 +87,11 @@
   }
   if XNN_UNLIKELY(n != 0) {
     ${"do " if BATCH_TILE > 8 else ""}{
-      const __m256i vx${ABC[0:8]} = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const __m128i*) input_x));
+      const __m256i va${ABC[0:8]} = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const __m128i*) input_a));
       $if BATCH_TILE > 8:
-        input_x += 8;
+        input_a += 8;
 
-      __m256i vacc${ABC[0:8]} = _mm256_add_epi32(vzero_point_product, _mm256_mullo_epi32(vx${ABC[0:8]}, vx_multiplier));
+      __m256i vacc${ABC[0:8]} = _mm256_add_epi32(vzero_point_product, _mm256_mullo_epi32(va${ABC[0:8]}, va_multiplier));
 
       const __m256i vrem${ABC[0:8]} = _mm256_add_epi32(_mm256_and_si256(vacc${ABC[0:8]}, vremainder_mask), _mm256_srai_epi32(vacc${ABC[0:8]}, 31));