Generalize QS8 VADD[C] templates to cover QU8 VADD[C] microkernels

- QU8 VADD[C] SSE2 MUL16 microkernels
- QU8 VADD[C] NEON microkernels
- QU8 VADD[C] WAsm SIMD microkernels
- QU8 VADD[C] scalar microkernels
- Unit tests

PiperOrigin-RevId: 385931044
diff --git a/src/init.c b/src/init.c
index 447f5e0..1e3385b 100644
--- a/src/init.c
+++ b/src/init.c
@@ -219,7 +219,7 @@
         .mp = (xnn_gavgpool_multipass_ukernel_function) xnn_qu8_gavgpool_minmax_ukernel_7p7x__neon_c8,
         .mr = 7,
       };
-      xnn_params.qu8.vadd = (xnn_vadd_ukernel_function) xnn_qu8_vadd_minmax_ukernel__neon_x32;
+      xnn_params.qu8.vadd = (xnn_vadd_ukernel_function) xnn_qu8_vadd_minmax_ukernel__neon_ld64_x8;
     #endif  // XNN_NO_QU8_OPERATORS
 
     /**************************** U8 micro-kernels ****************************/
@@ -1286,7 +1286,7 @@
       .mp = (xnn_gavgpool_multipass_ukernel_function) xnn_qu8_gavgpool_minmax_ukernel_7p7x__neon_c8,
       .mr = 7,
     };
-    xnn_params.qu8.vadd = (xnn_vadd_ukernel_function) xnn_qu8_vadd_minmax_ukernel__neon_x32;
+    xnn_params.qu8.vadd = (xnn_vadd_ukernel_function) xnn_qu8_vadd_minmax_ukernel__neon_ld64_x8;
   #endif  // XNN_NO_QU8_OPERATORS
 
   /**************************** U8 micro-kernels ****************************/
@@ -2255,7 +2255,7 @@
       .mp = (xnn_gavgpool_multipass_ukernel_function) xnn_qu8_gavgpool_minmax_ukernel_7p7x__sse2_c8,
       .mr = 7,
     };
-    xnn_params.qu8.vadd = (xnn_vadd_ukernel_function) xnn_qu8_vadd_minmax_ukernel__sse2_x8;
+    xnn_params.qu8.vadd = (xnn_vadd_ukernel_function) xnn_qu8_vadd_minmax_ukernel__sse2_mul16_ld64_x8;
   #endif  // XNN_NO_QU8_OPERATORS
 
   /**************************** U8 micro-kernels ****************************/
diff --git a/src/params-init.c b/src/params-init.c
index a2c3509..798de6b 100644
--- a/src/params-init.c
+++ b/src/params-init.c
@@ -1740,7 +1740,8 @@
   params->scalar.max = (int32_t) (uint32_t) output_max;
 }
 
-void xnn_init_qu8_add_minmax_params(
+#if XNN_ARCH_X86 || XNN_ARCH_X86_64
+void xnn_init_qu8_add_minmax_sse2_params(
   union xnn_qu8_add_minmax_params params[XNN_MIN_ELEMENTS(1)],
   uint8_t a_zero_point,
   uint8_t b_zero_point,
@@ -1774,58 +1775,135 @@
   assert(a_multiplier < INT32_C(0x00200000));
   assert(b_multiplier < INT32_C(0x00200000));
 
-  #if XNN_ARCH_X86 || XNN_ARCH_X86_64
-    const uint32_t remainder_mask = (UINT32_C(1) << shift) - UINT32_C(1);
-    const uint32_t remainder_threshold = remainder_mask >> 1;
-    const int32_t zero_point_product =
-      (int32_t) -(a_multiplier * (uint32_t) a_zero_point + b_multiplier * (uint32_t) b_zero_point);
-    for (uint32_t i = 0; i < 4; i++) {
-      params->sse2.zero_point_product[i] = zero_point_product;
-    }
-    for (uint32_t i = 0; i < 8; i++) {
-      params->sse2.y_zero_point[i] = (int16_t) (uint16_t) output_zero_point;
-    }
-    for (uint32_t i = 0; i < 8; i++) {
-      params->sse2.a_multiplier_lo[i] = (uint16_t) (uint32_t) a_multiplier;
-      params->sse2.a_multiplier_hi[i] = (uint16_t) ((uint32_t) a_multiplier >> 16);
-      params->sse2.b_multiplier_lo[i] = (uint16_t) (uint32_t) b_multiplier;
-      params->sse2.b_multiplier_hi[i] = (uint16_t) ((uint32_t) b_multiplier >> 16);
-    }
-    params->sse2.a_multiplier = a_multiplier;
-    params->sse2.b_multiplier = b_multiplier;
-    for (uint32_t i = 0; i < 4; i++) {
-      params->sse2.remainder_mask[i] = remainder_mask;
-      params->sse2.remainder_threshold[i] = remainder_threshold;
-    }
-    params->sse2.shift = shift;
-    for (uint32_t i = 0; i < 16; i++) {
-      params->sse2.y_min[i] = output_min;
-      params->sse2.y_max[i] = output_max;
-    }
-  #elif XNN_ARCH_ARM || XNN_ARCH_ARM64
-    params->neon.a_zero_point = a_zero_point;
-    params->neon.b_zero_point = b_zero_point;
-    params->neon.y_zero_point = (int16_t) (uint16_t) output_zero_point;
-    params->neon.a_multiplier = (int32_t) a_multiplier;
-    params->neon.b_multiplier = (int32_t) b_multiplier;
-    params->neon.right_shift = (int32_t) -shift;
-    params->neon.y_min = output_min;
-    params->neon.y_max = output_max;
-  #else
-    const uint32_t remainder_mask = (UINT32_C(1) << shift) - UINT32_C(1);
-    const uint32_t remainder_threshold = remainder_mask >> 1;
-    params->scalar.zero_point_product =
-      (int32_t) -(a_multiplier * (uint32_t) a_zero_point + b_multiplier * (uint32_t) b_zero_point);
-    params->scalar.a_multiplier = a_multiplier;
-    params->scalar.b_multiplier = b_multiplier;
-    params->scalar.remainder_mask = (int32_t) remainder_mask;
-    params->scalar.remainder_threshold = (int32_t) remainder_threshold;
-    params->scalar.shift = shift;
-    params->scalar.y_zero_point = (int32_t) (uint32_t) output_zero_point;
-    params->scalar.y_min = (int32_t) (uint32_t) output_min;
-    params->scalar.y_max = (int32_t) (uint32_t) output_max;
-  #endif
+  const int32_t rounding = INT32_C(1) << (shift - 1);
+  const int32_t bias = (int32_t) -(a_multiplier * (int32_t) a_zero_point + b_multiplier * (int32_t) b_zero_point);
+  for (uint32_t i = 0; i < 4; i++) {
+    params->sse2.bias[i] = bias;
+  }
+  const uint16_t a_multiplier_lo = (uint16_t) a_multiplier;
+  const uint16_t a_multiplier_hi = (uint16_t) ((uint32_t) a_multiplier >> 16);
+  const uint16_t b_multiplier_lo = (uint16_t) b_multiplier;
+  const uint16_t b_multiplier_hi = (uint16_t) ((uint32_t) b_multiplier >> 16);
+  for (uint32_t i = 0; i < 8; i++) {
+    params->sse2.a_multiplier_lo[i] = a_multiplier_lo;
+    params->sse2.a_multiplier_hi[i] = a_multiplier_hi;
+    params->sse2.b_multiplier_lo[i] = b_multiplier_lo;
+    params->sse2.b_multiplier_hi[i] = b_multiplier_hi;
+  }
+  params->sse2.shift = shift;
+  params->sse2.b_multiplier = (uint32_t) b_multiplier;
+  for (uint32_t i = 0; i < 4; i++) {
+    params->sse2.rounding[i] = rounding;
+  }
+  for (uint32_t i = 0; i < 8; i++) {
+    params->sse2.output_zero_point[i] = (int16_t) (uint16_t) output_zero_point;
+  }
+  for (uint32_t i = 0; i < 16; i++) {
+    params->sse2.output_min[i] = output_min;
+    params->sse2.output_max[i] = output_max;
+  }
 }
+#endif  // XNN_ARCH_X86 || XNN_ARCH_X86_64
+
+#if XNN_ARCH_ARM || XNN_ARCH_ARM64
+void xnn_init_qu8_add_minmax_neon_params(
+  union xnn_qu8_add_minmax_params params[XNN_MIN_ELEMENTS(1)],
+  uint8_t a_zero_point,
+  uint8_t b_zero_point,
+  uint8_t output_zero_point,
+  float a_output_scale,
+  float b_output_scale,
+  uint8_t output_min,
+  uint8_t output_max)
+{
+  assert(a_output_scale >= 0x1.0p-10f);
+  assert(b_output_scale >= 0x1.0p-10f);
+  assert(a_output_scale < 0x1.0p+8f);
+  assert(b_output_scale < 0x1.0p+8f);
+
+  // Compute requantization parameters.
+  const float max_output_scale = math_max_f32(a_output_scale, b_output_scale);
+  assert(max_output_scale >= 0x1.0p-10f);
+  assert(max_output_scale < 0x1.0p+8f);
+  const uint32_t max_scale_bits = fp32_to_bits(max_output_scale);
+  const int32_t max_scale_exponent = (int32_t) (max_scale_bits >> 23) - 127;
+
+  // Shift is in [12, 30] range.
+  const uint32_t shift = (uint32_t) (20 /* multiplier bits */ - max_scale_exponent);
+  assert(shift <= 30);
+  assert(shift >= 12);
+
+  // Multipliers are in [0, 2**21) range, largest multiplier is in [2**20, 2**21) range.
+  const int32_t a_multiplier = (int32_t) lrintf(fp32_from_bits(fp32_to_bits(a_output_scale) + (shift << 23)));
+  const int32_t b_multiplier = (int32_t) lrintf(fp32_from_bits(fp32_to_bits(b_output_scale) + (shift << 23)));
+  assert(math_max_s32(a_multiplier, b_multiplier) >= INT32_C(0x00100000));
+  assert(a_multiplier < INT32_C(0x00200000));
+  assert(b_multiplier < INT32_C(0x00200000));
+
+  params->neon.a_zero_point = a_zero_point;
+  params->neon.b_zero_point = b_zero_point;
+  params->neon.a_multiplier = (int32_t) a_multiplier;
+  params->neon.b_multiplier = (int32_t) b_multiplier;
+  params->neon.right_shift = (int32_t) -shift;
+  params->neon.output_zero_point = (int16_t) (uint16_t) output_zero_point;
+  params->neon.output_min = output_min;
+  params->neon.output_max = output_max;
+}
+#endif  // XNN_ARCH_ARM || XNN_ARCH_ARM64
+
+#if XNN_ARCH_WASMSIMD
+void xnn_init_qu8_add_minmax_wasmsimd_params(
+  union xnn_qu8_add_minmax_params params[XNN_MIN_ELEMENTS(1)],
+  uint8_t a_zero_point,
+  uint8_t b_zero_point,
+  uint8_t output_zero_point,
+  float a_output_scale,
+  float b_output_scale,
+  uint8_t output_min,
+  uint8_t output_max)
+{
+  assert(a_output_scale >= 0x1.0p-10f);
+  assert(b_output_scale >= 0x1.0p-10f);
+  assert(a_output_scale < 0x1.0p+8f);
+  assert(b_output_scale < 0x1.0p+8f);
+
+  // Compute requantization parameters.
+  const float max_output_scale = math_max_f32(a_output_scale, b_output_scale);
+  assert(max_output_scale >= 0x1.0p-10f);
+  assert(max_output_scale < 0x1.0p+8f);
+  const uint32_t max_scale_bits = fp32_to_bits(max_output_scale);
+  const int32_t max_scale_exponent = (int32_t) (max_scale_bits >> 23) - 127;
+
+  // Shift is in [12, 30] range.
+  const uint32_t shift = (uint32_t) (20 /* multiplier bits */ - max_scale_exponent);
+  assert(shift <= 30);
+  assert(shift >= 12);
+
+  // Multipliers are in [0, 2**21) range, largest multiplier is in [2**20, 2**21) range.
+  const int32_t a_multiplier = (int32_t) lrintf(fp32_from_bits(fp32_to_bits(a_output_scale) + (shift << 23)));
+  const int32_t b_multiplier = (int32_t) lrintf(fp32_from_bits(fp32_to_bits(b_output_scale) + (shift << 23)));
+  assert(math_max_s32(a_multiplier, b_multiplier) >= INT32_C(0x00100000));
+  assert(a_multiplier < INT32_C(0x00200000));
+  assert(b_multiplier < INT32_C(0x00200000));
+
+  const int32_t rounding = INT32_C(1) << (shift - 1);
+  const int32_t bias = (int32_t) -(a_multiplier * (int32_t) (uint32_t) a_zero_point + b_multiplier * (int32_t) (uint32_t) b_zero_point);
+  for (uint32_t i = 0; i < 4; i++) {
+    params->wasmsimd.bias[i] = bias;
+    params->wasmsimd.a_multiplier[i] = a_multiplier;
+    params->wasmsimd.b_multiplier[i] = b_multiplier;
+    params->wasmsimd.rounding[i] = rounding;
+  }
+  params->wasmsimd.shift = shift;
+  for (uint32_t i = 0; i < 8; i++) {
+    params->wasmsimd.output_zero_point[i] = (int16_t) (uint16_t) output_zero_point;
+  }
+  for (uint32_t i = 0; i < 16; i++) {
+    params->wasmsimd.output_min[i] = output_min;
+    params->wasmsimd.output_max[i] = output_max;
+  }
+}
+#endif  // XNN_ARCH_WASMSIMD
 
 void xnn_init_qu8_add_minmax_scalar_params(
   union xnn_qu8_add_minmax_params params[XNN_MIN_ELEMENTS(1)],
@@ -1861,18 +1939,15 @@
   assert(a_multiplier < INT32_C(0x00200000));
   assert(b_multiplier < INT32_C(0x00200000));
 
-  const uint32_t remainder_mask = (UINT32_C(1) << shift) - UINT32_C(1);
-  const uint32_t remainder_threshold = remainder_mask >> 1;
-  params->scalar.zero_point_product =
-    (int32_t) -(a_multiplier * (uint32_t) a_zero_point + b_multiplier * (uint32_t) b_zero_point);
+  const int32_t rounding = INT32_C(1) << (shift - 1);
+  params->scalar.bias = (int32_t) -(a_multiplier * (int32_t) (uint32_t) a_zero_point + b_multiplier * (int32_t) (uint32_t) b_zero_point);
   params->scalar.a_multiplier = a_multiplier;
   params->scalar.b_multiplier = b_multiplier;
-  params->scalar.remainder_mask = (int32_t) remainder_mask;
-  params->scalar.remainder_threshold = (int32_t) remainder_threshold;
+  params->scalar.rounding = rounding;
   params->scalar.shift = shift;
-  params->scalar.y_zero_point = (int32_t) (uint32_t) output_zero_point;
-  params->scalar.y_min = (int32_t) (uint32_t) output_min;
-  params->scalar.y_max = (int32_t) (uint32_t) output_max;
+  params->scalar.output_min_less_zero_point = (int32_t) (uint32_t) output_min - (int32_t) (uint32_t) output_zero_point;
+  params->scalar.output_max_less_zero_point = (int32_t) (uint32_t) output_max - (int32_t) (uint32_t) output_zero_point;
+  params->scalar.output_zero_point = (int32_t) (uint32_t) output_zero_point;
 }
 
 #if XNN_ARCH_X86 || XNN_ARCH_X86_64
diff --git a/src/qs8-vadd/avx2-mul32-ld64.c.in b/src/qs8-vadd/avx2-mul32-ld64.c.in
index b45cc3e..89a0a6c 100644
--- a/src/qs8-vadd/avx2-mul32-ld64.c.in
+++ b/src/qs8-vadd/avx2-mul32-ld64.c.in
@@ -3,6 +3,7 @@
 // This source code is licensed under the BSD-style license found in the
 // LICENSE file in the root directory of this source tree.
 
+$assert DATATYPE in ["QS8", "QU8"]
 $assert BATCH_TILE % 8 == 0
 $assert BATCH_TILE >= 8
 $ABC = "0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZ"
diff --git a/src/qs8-vadd/gen/minmax-scalar-x2.c b/src/qs8-vadd/gen/minmax-scalar-x2.c
index be9be21..f8c6e48 100644
--- a/src/qs8-vadd/gen/minmax-scalar-x2.c
+++ b/src/qs8-vadd/gen/minmax-scalar-x2.c
@@ -55,8 +55,8 @@
     vout0 += voutput_zero_point;
     vout1 += voutput_zero_point;
 
-    output[0] = vout0;
-    output[1] = vout1;
+    output[0] = (int8_t) vout0;
+    output[1] = (int8_t) vout1;
     output += 2;
   }
   if XNN_UNLIKELY(n != 0) {
diff --git a/src/qs8-vadd/gen/minmax-scalar-x4.c b/src/qs8-vadd/gen/minmax-scalar-x4.c
index 3b7a969..b0e0c06 100644
--- a/src/qs8-vadd/gen/minmax-scalar-x4.c
+++ b/src/qs8-vadd/gen/minmax-scalar-x4.c
@@ -71,10 +71,10 @@
     vout2 += voutput_zero_point;
     vout3 += voutput_zero_point;
 
-    output[0] = vout0;
-    output[1] = vout1;
-    output[2] = vout2;
-    output[3] = vout3;
+    output[0] = (int8_t) vout0;
+    output[1] = (int8_t) vout1;
+    output[2] = (int8_t) vout2;
+    output[3] = (int8_t) vout3;
     output += 4;
   }
   if XNN_UNLIKELY(n != 0) {
diff --git a/src/qs8-vadd/gen/minmax-sse2-mul16-ld64-x16.c b/src/qs8-vadd/gen/minmax-sse2-mul16-ld64-x16.c
index 7ca8fd9..f0aaf7f 100644
--- a/src/qs8-vadd/gen/minmax-sse2-mul16-ld64-x16.c
+++ b/src/qs8-vadd/gen/minmax-sse2-mul16-ld64-x16.c
@@ -141,7 +141,7 @@
           output += 4;
         }
         if (n & (2 * sizeof(int8_t))) {
-          *((uint16_t*) output) = (uint16_t) _mm_extract_epi16(vout0123456701234567, 0);
+          *((uint16_t*) output) = (uint16_t) _mm_cvtsi128_si32(vout0123456701234567);
           vout0123456701234567 = _mm_srli_epi32(vout0123456701234567, 16);
           output += 2;
         }
diff --git a/src/qs8-vadd/gen/minmax-sse2-mul16-ld64-x24.c b/src/qs8-vadd/gen/minmax-sse2-mul16-ld64-x24.c
index 2be3376..a0ec3e7 100644
--- a/src/qs8-vadd/gen/minmax-sse2-mul16-ld64-x24.c
+++ b/src/qs8-vadd/gen/minmax-sse2-mul16-ld64-x24.c
@@ -164,7 +164,7 @@
           output += 4;
         }
         if (n & (2 * sizeof(int8_t))) {
-          *((uint16_t*) output) = (uint16_t) _mm_extract_epi16(vout0123456701234567, 0);
+          *((uint16_t*) output) = (uint16_t) _mm_cvtsi128_si32(vout0123456701234567);
           vout0123456701234567 = _mm_srli_epi32(vout0123456701234567, 16);
           output += 2;
         }
diff --git a/src/qs8-vadd/gen/minmax-sse2-mul16-ld64-x32.c b/src/qs8-vadd/gen/minmax-sse2-mul16-ld64-x32.c
index 8868d51..5dafcdc 100644
--- a/src/qs8-vadd/gen/minmax-sse2-mul16-ld64-x32.c
+++ b/src/qs8-vadd/gen/minmax-sse2-mul16-ld64-x32.c
@@ -185,7 +185,7 @@
           output += 4;
         }
         if (n & (2 * sizeof(int8_t))) {
-          *((uint16_t*) output) = (uint16_t) _mm_extract_epi16(vout0123456701234567, 0);
+          *((uint16_t*) output) = (uint16_t) _mm_cvtsi128_si32(vout0123456701234567);
           vout0123456701234567 = _mm_srli_epi32(vout0123456701234567, 16);
           output += 2;
         }
diff --git a/src/qs8-vadd/gen/minmax-sse2-mul16-ld64-x8.c b/src/qs8-vadd/gen/minmax-sse2-mul16-ld64-x8.c
index 941da0b..8e1ae85 100644
--- a/src/qs8-vadd/gen/minmax-sse2-mul16-ld64-x8.c
+++ b/src/qs8-vadd/gen/minmax-sse2-mul16-ld64-x8.c
@@ -113,12 +113,12 @@
         output += 4;
       }
       if (n & (2 * sizeof(int8_t))) {
-        *((uint16_t*) output) = (uint16_t) _mm_extract_epi16(vout0123456701234567, 0);
+        *((uint16_t*) output) = (uint16_t) _mm_cvtsi128_si32(vout0123456701234567);
         vout0123456701234567 = _mm_srli_epi32(vout0123456701234567, 16);
         output += 2;
       }
       if (n & (1 * sizeof(int8_t))) {
-        *output = (int32_t) _mm_cvtsi128_si32(vout0123456701234567);
+        *output = (int8_t) _mm_cvtsi128_si32(vout0123456701234567);
       }
     }
   }
diff --git a/src/qs8-vadd/gen/minmax-wasmsimd-x16.c b/src/qs8-vadd/gen/minmax-wasmsimd-x16.c
index 9b54ec6..75c298f 100644
--- a/src/qs8-vadd/gen/minmax-wasmsimd-x16.c
+++ b/src/qs8-vadd/gen/minmax-wasmsimd-x16.c
@@ -103,7 +103,7 @@
           output += 2;
         }
         if (n & (1 * sizeof(int8_t))) {
-          *output = wasm_i8x16_extract_lane(vout0123456701234567, 0);
+          *output = (int8_t) wasm_i8x16_extract_lane(vout0123456701234567, 0);
         }
         n = 0;
       }
diff --git a/src/qs8-vadd/gen/minmax-wasmsimd-x24.c b/src/qs8-vadd/gen/minmax-wasmsimd-x24.c
index 7440a68..c229fe0 100644
--- a/src/qs8-vadd/gen/minmax-wasmsimd-x24.c
+++ b/src/qs8-vadd/gen/minmax-wasmsimd-x24.c
@@ -116,7 +116,7 @@
           output += 2;
         }
         if (n & (1 * sizeof(int8_t))) {
-          *output = wasm_i8x16_extract_lane(vout0123456701234567, 0);
+          *output = (int8_t) wasm_i8x16_extract_lane(vout0123456701234567, 0);
         }
         n = 0;
       }
diff --git a/src/qs8-vadd/gen/minmax-wasmsimd-x32.c b/src/qs8-vadd/gen/minmax-wasmsimd-x32.c
index 7658242..18774e9 100644
--- a/src/qs8-vadd/gen/minmax-wasmsimd-x32.c
+++ b/src/qs8-vadd/gen/minmax-wasmsimd-x32.c
@@ -125,7 +125,7 @@
           output += 2;
         }
         if (n & (1 * sizeof(int8_t))) {
-          *output = wasm_i8x16_extract_lane(vout0123456701234567, 0);
+          *output = (int8_t) wasm_i8x16_extract_lane(vout0123456701234567, 0);
         }
         n = 0;
       }
diff --git a/src/qs8-vadd/gen/minmax-wasmsimd-x8.c b/src/qs8-vadd/gen/minmax-wasmsimd-x8.c
index 9a43fef..03fef74 100644
--- a/src/qs8-vadd/gen/minmax-wasmsimd-x8.c
+++ b/src/qs8-vadd/gen/minmax-wasmsimd-x8.c
@@ -87,7 +87,7 @@
         output += 2;
       }
       if (n & (1 * sizeof(int8_t))) {
-        *output = wasm_i8x16_extract_lane(vout0123456701234567, 0);
+        *output = (int8_t) wasm_i8x16_extract_lane(vout0123456701234567, 0);
       }
     }
   }
diff --git a/src/qs8-vadd/neon-ld64.c.in b/src/qs8-vadd/neon-ld64.c.in
index 609f6ec..cb3bcbc 100644
--- a/src/qs8-vadd/neon-ld64.c.in
+++ b/src/qs8-vadd/neon-ld64.c.in
@@ -3,6 +3,7 @@
 // This source code is licensed under the BSD-style license found in the
 // LICENSE file in the root directory of this source tree.
 
+$assert DATATYPE in ["QS8", "QU8"]
 $assert BATCH_TILE % 8 == 0
 $assert BATCH_TILE >= 8
 $ABC = "0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZ"
@@ -13,30 +14,53 @@
 #include <xnnpack/vadd.h>
 
 
-void xnn_qs8_vadd_minmax_ukernel__neon_ld64_x${BATCH_TILE}(
+$XINT8_T = {"QS8": "int8_t", "QU8": "uint8_t"}[DATATYPE]
+$XINT8X8_T = {"QS8": "int8x8_t", "QU8": "uint8x8_t"}[DATATYPE]
+$XINT8X16_T = {"QS8": "int8x16_t", "QU8": "uint8x16_t"}[DATATYPE]
+$VLD1_X8 = {"QS8": "vld1_s8", "QU8": "vld1_u8"}[DATATYPE]
+$VLD1_DUP_X8 = {"QS8": "vld1_dup_s8", "QU8": "vld1_dup_u8"}[DATATYPE]
+$VLD1Q_DUP_X8 = {"QS8": "vld1q_dup_s8", "QU8": "vld1q_dup_u8"}[DATATYPE]
+$VST1_LANE_X8 = {"QS8": "vst1_lane_s8", "QU8": "vst1_lane_u8"}[DATATYPE]
+$VST1_X8 = {"QS8": "vst1_s8", "QU8": "vst1_u8"}[DATATYPE]
+$VST1Q_X8 = {"QS8": "vst1q_s8", "QU8": "vst1q_u8"}[DATATYPE]
+$VMIN_X8 = {"QS8": "vmin_s8", "QU8": "vmin_u8"}[DATATYPE]
+$VMAX_X8 = {"QS8": "vmax_s8", "QU8": "vmax_u8"}[DATATYPE]
+$VMINQ_X8 = {"QS8": "vminq_s8", "QU8": "vminq_u8"}[DATATYPE]
+$VMAXQ_X8 = {"QS8": "vmaxq_s8", "QU8": "vmaxq_u8"}[DATATYPE]
+$VQMOVXN_S16 = {"QS8": "vqmovn_s16", "QU8": "vqmovun_s16"}[DATATYPE]
+$VEXT_X8 = {"QS8": "vext_s8", "QU8": "vext_u8"}[DATATYPE]
+$VGET_LOW_X8 = {"QS8": "vget_low_s8", "QU8": "vget_low_u8"}[DATATYPE]
+$VCOMBINE_X8 = {"QS8": "vcombine_s8", "QU8": "vcombine_u8"}[DATATYPE]
+$VREINTERPRET_U32_X8 = {"QS8": "vreinterpret_u32_s8", "QU8": "vreinterpret_u32_u8"}[DATATYPE]
+$VREINTERPRET_U16_X8 = {"QS8": "vreinterpret_u16_s8", "QU8": "vreinterpret_u16_u8"}[DATATYPE]
+void xnn_${DATATYPE.lower()}_vadd_minmax_ukernel__neon_ld64_x${BATCH_TILE}(
     size_t n,
-    const int8_t* input_a,
-    const int8_t* input_b,
-    int8_t* output,
-    const union xnn_qs8_add_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_DISABLE_TSAN
+    const ${XINT8_T}* input_a,
+    const ${XINT8_T}* input_b,
+    ${XINT8_T}* output,
+    const union xnn_${DATATYPE.lower()}_add_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_DISABLE_TSAN
 {
-  const int8x8_t va_zero_point = vld1_dup_s8(&params->neon.a_zero_point);
-  const int8x8_t vb_zero_point = vld1_dup_s8(&params->neon.b_zero_point);
+  const ${XINT8X8_T} va_zero_point = ${VLD1_DUP_X8}(&params->neon.a_zero_point);
+  const ${XINT8X8_T} vb_zero_point = ${VLD1_DUP_X8}(&params->neon.b_zero_point);
   const int32x4_t va_multiplier = vld1q_dup_s32(&params->neon.a_multiplier);
   const int32x4_t vb_multiplier = vld1q_dup_s32(&params->neon.b_multiplier);
   const int32x4_t vright_shift = vld1q_dup_s32(&params->neon.right_shift);
   const int16x8_t voutput_zero_point = vld1q_dup_s16(&params->neon.output_zero_point);
-  const int8x16_t voutput_min = vld1q_dup_s8(&params->neon.output_min);
-  const int8x16_t voutput_max = vld1q_dup_s8(&params->neon.output_max);
+  const ${XINT8X16_T} voutput_min = ${VLD1Q_DUP_X8}(&params->neon.output_min);
+  const ${XINT8X16_T} voutput_max = ${VLD1Q_DUP_X8}(&params->neon.output_max);
 
-  for (; n >= ${BATCH_TILE} * sizeof(int8_t); n -= ${BATCH_TILE} * sizeof(int8_t)) {
+  for (; n >= ${BATCH_TILE} * sizeof(${XINT8_T}); n -= ${BATCH_TILE} * sizeof(${XINT8_T})) {
     $for N in range(0, BATCH_TILE, 8):
-      const int8x8_t va${ABC[N:N+8]} = vld1_s8(input_a); input_a += 8;
-      const int8x8_t vb${ABC[N:N+8]} = vld1_s8(input_b); input_b += 8;
+      const ${XINT8X8_T} va${ABC[N:N+8]} = ${VLD1_X8}(input_a); input_a += 8;
+      const ${XINT8X8_T} vb${ABC[N:N+8]} = ${VLD1_X8}(input_b); input_b += 8;
 
     $for N in range(0, BATCH_TILE, 8):
-      const int16x8_t vxa${ABC[N:N+8]} = vsubl_s8(va${ABC[N:N+8]}, va_zero_point);
-      const int16x8_t vxb${ABC[N:N+8]} = vsubl_s8(vb${ABC[N:N+8]}, vb_zero_point);
+      $if DATATYPE == "QU8":
+        const int16x8_t vxa${ABC[N:N+8]} = vreinterpretq_s16_u16(vsubl_u8(va${ABC[N:N+8]}, va_zero_point));
+        const int16x8_t vxb${ABC[N:N+8]} = vreinterpretq_s16_u16(vsubl_u8(vb${ABC[N:N+8]}, vb_zero_point));
+      $else:
+        const int16x8_t vxa${ABC[N:N+8]} = vsubl_s8(va${ABC[N:N+8]}, va_zero_point);
+        const int16x8_t vxb${ABC[N:N+8]} = vsubl_s8(vb${ABC[N:N+8]}, vb_zero_point);
 
     $for N in range(0, BATCH_TILE, 8):
       int32x4_t vacc${ABC[N:N+4]} = vmulq_s32(vmovl_s16(vget_low_s16(vxa${ABC[N:N+8]})), va_multiplier);
@@ -54,39 +78,43 @@
 
     $for N in range(0, BATCH_TILE, 16):
       $if N + 8 < BATCH_TILE:
-        int8x16_t vout${ABC[N:N+16]} = vcombine_s8(vqmovn_s16(vacc${ABC[N:N+8]}), vqmovn_s16(vacc${ABC[N+8:N+16]}));
+        ${XINT8X16_T} vout${ABC[N:N+16]} = ${VCOMBINE_X8}(${VQMOVXN_S16}(vacc${ABC[N:N+8]}), ${VQMOVXN_S16}(vacc${ABC[N+8:N+16]}));
       $else:
-        int8x8_t vout${ABC[N:N+8]} = vqmovn_s16(vacc${ABC[N:N+8]});
+        ${XINT8X8_T} vout${ABC[N:N+8]} = ${VQMOVXN_S16}(vacc${ABC[N:N+8]});
 
     $for N in range(0, BATCH_TILE, 16):
       $if N + 8 < BATCH_TILE:
-        vout${ABC[N:N+16]} = vmaxq_s8(vout${ABC[N:N+16]}, voutput_min);
+        vout${ABC[N:N+16]} = ${VMAXQ_X8}(vout${ABC[N:N+16]}, voutput_min);
       $else:
-        vout${ABC[N:N+8]} = vmax_s8(vout${ABC[N:N+8]}, vget_low_s8(voutput_min));
+        vout${ABC[N:N+8]} = ${VMAX_X8}(vout${ABC[N:N+8]}, ${VGET_LOW_X8}(voutput_min));
 
     $for N in range(0, BATCH_TILE, 16):
       $if N + 8 < BATCH_TILE:
-        vout${ABC[N:N+16]} = vminq_s8(vout${ABC[N:N+16]}, voutput_max);
+        vout${ABC[N:N+16]} = ${VMINQ_X8}(vout${ABC[N:N+16]}, voutput_max);
       $else:
-        vout${ABC[N:N+8]} = vmin_s8(vout${ABC[N:N+8]}, vget_low_s8(voutput_max));
+        vout${ABC[N:N+8]} = ${VMIN_X8}(vout${ABC[N:N+8]}, ${VGET_LOW_X8}(voutput_max));
 
     $for N in range(0, BATCH_TILE, 16):
       $if N + 8 < BATCH_TILE:
-        vst1q_s8(output, vout${ABC[N:N+16]}); output += 16;
+        ${VST1Q_X8}(output, vout${ABC[N:N+16]}); output += 16;
       $else:
-        vst1_s8(output, vout${ABC[N:N+8]}); output += 8;
+        ${VST1_X8}(output, vout${ABC[N:N+8]}); output += 8;
   }
   if XNN_UNLIKELY(n != 0) {
     ${"do " if BATCH_TILE > 8 else ""}{
       $if BATCH_TILE > 8:
-        const int8x8_t va${ABC[0:8]} = vld1_s8(input_a); input_a += 8;
-        const int8x8_t vb${ABC[0:8]} = vld1_s8(input_b); input_b += 8;
+        const ${XINT8X8_T} va${ABC[0:8]} = ${VLD1_X8}(input_a); input_a += 8;
+        const ${XINT8X8_T} vb${ABC[0:8]} = ${VLD1_X8}(input_b); input_b += 8;
       $else:
-        const int8x8_t va${ABC[0:8]} = vld1_s8(input_a);
-        const int8x8_t vb${ABC[0:8]} = vld1_s8(input_b);
+        const ${XINT8X8_T} va${ABC[0:8]} = ${VLD1_X8}(input_a);
+        const ${XINT8X8_T} vb${ABC[0:8]} = ${VLD1_X8}(input_b);
 
-      const int16x8_t vxa${ABC[0:8]} = vsubl_s8(va${ABC[0:8]}, va_zero_point);
-      const int16x8_t vxb${ABC[0:8]} = vsubl_s8(vb${ABC[0:8]}, vb_zero_point);
+      $if DATATYPE == "QU8":
+        const int16x8_t vxa${ABC[0:8]} = vreinterpretq_s16_u16(vsubl_u8(va${ABC[0:8]}, va_zero_point));
+        const int16x8_t vxb${ABC[0:8]} = vreinterpretq_s16_u16(vsubl_u8(vb${ABC[0:8]}, vb_zero_point));
+      $else:
+        const int16x8_t vxa${ABC[0:8]} = vsubl_s8(va${ABC[0:8]}, va_zero_point);
+        const int16x8_t vxb${ABC[0:8]} = vsubl_s8(vb${ABC[0:8]}, vb_zero_point);
 
       int32x4_t vacc${ABC[0:4]} = vmulq_s32(vmovl_s16(vget_low_s16(vxa${ABC[0:8]})), va_multiplier);
       int32x4_t vacc${ABC[4:8]} = vmulq_s32(vmovl_s16(vget_high_s16(vxa${ABC[0:8]})), va_multiplier);
@@ -99,39 +127,39 @@
 
       const int16x8_t vacc${ABC[0:8]} = vqaddq_s16(vcombine_s16(vqmovn_s32(vacc${ABC[0:4]}), vqmovn_s32(vacc${ABC[4:8]})), voutput_zero_point);
 
-      int8x8_t vout${ABC[0:8]} = vqmovn_s16(vacc${ABC[0:8]});
-      vout${ABC[0:8]} = vmax_s8(vout${ABC[0:8]}, vget_low_s8(voutput_min));
-      vout${ABC[0:8]} = vmin_s8(vout${ABC[0:8]}, vget_low_s8(voutput_max));
+      ${XINT8X8_T} vout${ABC[0:8]} = ${VQMOVXN_S16}(vacc${ABC[0:8]});
+      vout${ABC[0:8]} = ${VMAX_X8}(vout${ABC[0:8]}, ${VGET_LOW_X8}(voutput_min));
+      vout${ABC[0:8]} = ${VMIN_X8}(vout${ABC[0:8]}, ${VGET_LOW_X8}(voutput_max));
 
       $if BATCH_TILE > 8:
-        if XNN_LIKELY(n >= (8 * sizeof(int8_t))) {
-          vst1_s8(output, vout${ABC[0:8]}); output += 8;
-          n -= 8 * sizeof(int8_t);
+        if XNN_LIKELY(n >= (8 * sizeof(${XINT8_T}))) {
+          ${VST1_X8}(output, vout${ABC[0:8]}); output += 8;
+          n -= 8 * sizeof(${XINT8_T});
         } else {
-          if (n & (4 * sizeof(int8_t))) {
-            vst1_lane_u32(__builtin_assume_aligned(output, 1), vreinterpret_u32_s8(vout${ABC[0:8]}), 0); output += 4;
-            vout${ABC[0:8]} = vext_s8(vout${ABC[0:8]}, vout${ABC[0:8]}, 4);
+          if (n & (4 * sizeof(${XINT8_T}))) {
+            vst1_lane_u32(__builtin_assume_aligned(output, 1), ${VREINTERPRET_U32_X8}(vout${ABC[0:8]}), 0); output += 4;
+            vout${ABC[0:8]} = ${VEXT_X8}(vout${ABC[0:8]}, vout${ABC[0:8]}, 4);
           }
-          if (n & (2 * sizeof(int8_t))) {
-            vst1_lane_u16(__builtin_assume_aligned(output, 1), vreinterpret_u16_s8(vout${ABC[0:8]}), 0); output += 2;
-            vout${ABC[0:8]} = vext_s8(vout${ABC[0:8]}, vout${ABC[0:8]}, 2);
+          if (n & (2 * sizeof(${XINT8_T}))) {
+            vst1_lane_u16(__builtin_assume_aligned(output, 1), ${VREINTERPRET_U16_X8}(vout${ABC[0:8]}), 0); output += 2;
+            vout${ABC[0:8]} = ${VEXT_X8}(vout${ABC[0:8]}, vout${ABC[0:8]}, 2);
           }
-          if (n & (1 * sizeof(int8_t))) {
-            vst1_lane_s8(output, vout${ABC[0:8]}, 0);
+          if (n & (1 * sizeof(${XINT8_T}))) {
+            ${VST1_LANE_X8}(output, vout${ABC[0:8]}, 0);
           }
           n = 0;
         }
       $else:
-        if (n & (4 * sizeof(int8_t))) {
-          vst1_lane_u32(__builtin_assume_aligned(output, 1), vreinterpret_u32_s8(vout${ABC[0:8]}), 0); output += 4;
-          vout${ABC[0:8]} = vext_s8(vout${ABC[0:8]}, vout${ABC[0:8]}, 4);
+        if (n & (4 * sizeof(${XINT8_T}))) {
+          vst1_lane_u32(__builtin_assume_aligned(output, 1), ${VREINTERPRET_U32_X8}(vout${ABC[0:8]}), 0); output += 4;
+          vout${ABC[0:8]} = ${VEXT_X8}(vout${ABC[0:8]}, vout${ABC[0:8]}, 4);
         }
-        if (n & (2 * sizeof(int8_t))) {
-          vst1_lane_u16(__builtin_assume_aligned(output, 1), vreinterpret_u16_s8(vout${ABC[0:8]}), 0); output += 2;
-          vout${ABC[0:8]} = vext_s8(vout${ABC[0:8]}, vout${ABC[0:8]}, 2);
+        if (n & (2 * sizeof(${XINT8_T}))) {
+          vst1_lane_u16(__builtin_assume_aligned(output, 1), ${VREINTERPRET_U16_X8}(vout${ABC[0:8]}), 0); output += 2;
+          vout${ABC[0:8]} = ${VEXT_X8}(vout${ABC[0:8]}, vout${ABC[0:8]}, 2);
         }
-        if (n & (1 * sizeof(int8_t))) {
-          vst1_lane_s8(output, vout${ABC[0:8]}, 0);
+        if (n & (1 * sizeof(${XINT8_T}))) {
+          ${VST1_LANE_X8}(output, vout${ABC[0:8]}, 0);
         }
     }${" while (n != 0);" if BATCH_TILE > 8 else ""}
   }
diff --git a/src/qs8-vadd/scalar.c.in b/src/qs8-vadd/scalar.c.in
index 24da0cf..bbae859 100644
--- a/src/qs8-vadd/scalar.c.in
+++ b/src/qs8-vadd/scalar.c.in
@@ -3,6 +3,7 @@
 // This source code is licensed under the BSD-style license found in the
 // LICENSE file in the root directory of this source tree.
 
+$assert DATATYPE in ["QS8", "QU8"]
 $assert BATCH_TILE >= 1
 #include <assert.h>
 
@@ -10,12 +11,13 @@
 #include <xnnpack/vadd.h>
 
 
-void xnn_qs8_vadd_minmax_ukernel__scalar_x${BATCH_TILE}(
+$XINT8_T = {"QS8": "int8_t", "QU8": "uint8_t"}[DATATYPE]
+void xnn_${DATATYPE.lower()}_vadd_minmax_ukernel__scalar_x${BATCH_TILE}(
     size_t n,
-    const int8_t* input_a,
-    const int8_t* input_b,
-    int8_t* output,
-    const union xnn_qs8_add_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_DISABLE_TSAN
+    const ${XINT8_T}* input_a,
+    const ${XINT8_T}* input_b,
+    ${XINT8_T}* output,
+    const union xnn_${DATATYPE.lower()}_add_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_DISABLE_TSAN
 {
   const int32_t vbias = params->scalar.bias;
   const int32_t va_multiplier = params->scalar.a_multiplier;
@@ -35,12 +37,12 @@
       int32_t vout = asr_s32(vacc + vrounding, vshift);
       vout = math_max_s32(vout, voutput_min_less_zero_point);
       vout = math_min_s32(vout, voutput_max_less_zero_point);
-      *output++ = (int8_t) (vout + voutput_zero_point);
+      *output++ = (${XINT8_T}) (vout + voutput_zero_point);
 
-      n -= sizeof(int8_t);
+      n -= sizeof(${XINT8_T});
     } while (n != 0);
   $else:
-    for (; n >= ${BATCH_TILE} * sizeof(int8_t); n -= ${BATCH_TILE} * sizeof(int8_t)) {
+    for (; n >= ${BATCH_TILE} * sizeof(${XINT8_T}); n -= ${BATCH_TILE} * sizeof(${XINT8_T})) {
       $for N in range(BATCH_TILE):
         const int32_t va${N} = input_a[${N}];
       input_a += ${BATCH_TILE};
@@ -66,7 +68,7 @@
         vout${N} += voutput_zero_point;
 
       $for N in range(BATCH_TILE):
-        output[${N}] = vout${N};
+        output[${N}] = (${XINT8_T}) vout${N};
       output += ${BATCH_TILE};
     }
     if XNN_UNLIKELY(n != 0) {
@@ -78,7 +80,7 @@
         int32_t vout = asr_s32(vacc + vrounding, vshift);
         vout = math_max_s32(vout, voutput_min_less_zero_point);
         vout = math_min_s32(vout, voutput_max_less_zero_point);
-        *output++ = (int8_t) (vout + voutput_zero_point);
+        *output++ = (${XINT8_T}) (vout + voutput_zero_point);
       $else:
         do {
           const int32_t va = *input_a++;
@@ -88,9 +90,9 @@
           int32_t vout = asr_s32(vacc + vrounding, vshift);
           vout = math_max_s32(vout, voutput_min_less_zero_point);
           vout = math_min_s32(vout, voutput_max_less_zero_point);
-          *output++ = (int8_t) (vout + voutput_zero_point);
+          *output++ = (${XINT8_T}) (vout + voutput_zero_point);
 
-          n -= sizeof(int8_t);
+          n -= sizeof(${XINT8_T});
         } while (n != 0);
     }
 }
diff --git a/src/qs8-vadd/sse-mul16-ld64.c.in b/src/qs8-vadd/sse-mul16-ld64.c.in
index a30051f..0c00842 100644
--- a/src/qs8-vadd/sse-mul16-ld64.c.in
+++ b/src/qs8-vadd/sse-mul16-ld64.c.in
@@ -3,6 +3,7 @@
 // This source code is licensed under the BSD-style license found in the
 // LICENSE file in the root directory of this source tree.
 
+$assert DATATYPE in ["QS8", "QU8"]
 $assert SSE in [2, 4]
 $assert not AVX or SSE == 4
 $SSE_HEADER = {2: "emmintrin.h", 4: "smmintrin.h"}[SSE]
@@ -17,13 +18,18 @@
 
 
 $PARAMS_STRUCT = "sse4_mul16" if SSE == 4 else "sse2"
+$XINT8_T = {"QS8": "int8_t", "QU8": "uint8_t"}[DATATYPE]
+$_MM_CVTEPI8_EPI16 = {"QS8": "_mm_cvtepi8_epi16", "QU8": "_mm_cvtepu8_epi16"}[DATATYPE]
+$_MM_PACKXS_EPI16 = {"QS8": "_mm_packs_epi16", "QU8": "_mm_packus_epi16"}[DATATYPE]
+$_MM_MIN_EPX8 = {"QS8": "_mm_min_epi8", "QU8": "_mm_min_epu8"}[DATATYPE]
+$_MM_MAX_EPX8 = {"QS8": "_mm_max_epi8", "QU8": "_mm_max_epu8"}[DATATYPE]
 $ISA = "avx" if AVX else {2: "sse2", 4: "sse41"}[SSE]
-void xnn_qs8_vadd_minmax_ukernel__${ISA}_mul16_ld64_x${BATCH_TILE}(
+void xnn_${DATATYPE.lower()}_vadd_minmax_ukernel__${ISA}_mul16_ld64_x${BATCH_TILE}(
     size_t n,
-    const int8_t* input_a,
-    const int8_t* input_b,
-    int8_t* output,
-    const union xnn_qs8_add_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_DISABLE_TSAN
+    const ${XINT8_T}* input_a,
+    const ${XINT8_T}* input_b,
+    ${XINT8_T}* output,
+    const union xnn_${DATATYPE.lower()}_add_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_DISABLE_TSAN
 {
   const __m128i vbias = _mm_load_si128((const __m128i*) params->${PARAMS_STRUCT}.bias);
   const __m128i va_multiplier_lo = _mm_load_si128((const __m128i*) params->${PARAMS_STRUCT}.a_multiplier_lo);
@@ -36,13 +42,13 @@
   const __m128i voutput_min = _mm_load_si128((const __m128i*) params->${PARAMS_STRUCT}.output_min);
   const __m128i voutput_max = _mm_load_si128((const __m128i*) params->${PARAMS_STRUCT}.output_max);
 
-  for (; n >= ${BATCH_TILE} * sizeof(int8_t); n -= ${BATCH_TILE} * sizeof(int8_t)) {
+  for (; n >= ${BATCH_TILE} * sizeof(${XINT8_T}); n -= ${BATCH_TILE} * sizeof(${XINT8_T})) {
     $if SSE == 4:
-      const __m128i va${ABC[0:8]} = _mm_cvtepi8_epi16(_mm_loadl_epi64((const __m128i*) input_a));
-      const __m128i vb${ABC[0:8]} = _mm_cvtepi8_epi16(_mm_loadl_epi64((const __m128i*) input_b));
+      const __m128i va${ABC[0:8]} = ${_MM_CVTEPI8_EPI16}(_mm_loadl_epi64((const __m128i*) input_a));
+      const __m128i vb${ABC[0:8]} = ${_MM_CVTEPI8_EPI16}(_mm_loadl_epi64((const __m128i*) input_b));
       $for N in range(8, BATCH_TILE, 8):
-        const __m128i va${ABC[N:N+8]} = _mm_cvtepi8_epi16(_mm_loadl_epi64((const __m128i*) (input_a + ${N})));
-        const __m128i vb${ABC[N:N+8]} = _mm_cvtepi8_epi16(_mm_loadl_epi64((const __m128i*) (input_b + ${N})));
+        const __m128i va${ABC[N:N+8]} = ${_MM_CVTEPI8_EPI16}(_mm_loadl_epi64((const __m128i*) (input_a + ${N})));
+        const __m128i vb${ABC[N:N+8]} = ${_MM_CVTEPI8_EPI16}(_mm_loadl_epi64((const __m128i*) (input_b + ${N})));
     $else:
       __m128i va${ABC[0:8]} = _mm_loadl_epi64((const __m128i*) input_a);
       __m128i vb${ABC[0:8]} = _mm_loadl_epi64((const __m128i*) input_b);
@@ -53,9 +59,15 @@
     input_b += ${BATCH_TILE};
 
     $if SSE < 4:
-      $for N in range(0, BATCH_TILE, 8):
-        va${ABC[N:N+8]} = _mm_srai_epi16(_mm_unpacklo_epi8(va${ABC[N:N+8]}, va${ABC[N:N+8]}), 8);
-        vb${ABC[N:N+8]} = _mm_srai_epi16(_mm_unpacklo_epi8(vb${ABC[N:N+8]}, vb${ABC[N:N+8]}), 8);
+      $if DATATYPE == "QU8":
+        const __m128i vzero = _mm_setzero_si128();
+        $for N in range(0, BATCH_TILE, 8):
+          va${ABC[N:N+8]} = _mm_unpacklo_epi8(va${ABC[N:N+8]}, vzero);
+          vb${ABC[N:N+8]} = _mm_unpacklo_epi8(vb${ABC[N:N+8]}, vzero);
+      $else:
+        $for N in range(0, BATCH_TILE, 8):
+          va${ABC[N:N+8]} = _mm_srai_epi16(_mm_unpacklo_epi8(va${ABC[N:N+8]}, va${ABC[N:N+8]}), 8);
+          vb${ABC[N:N+8]} = _mm_srai_epi16(_mm_unpacklo_epi8(vb${ABC[N:N+8]}, vb${ABC[N:N+8]}), 8);
 
     $for N in range(0, BATCH_TILE, 8):
       __m128i vaprod${ABC[N:N+8]}hi = _mm_mulhi_epu16(va${ABC[N:N+8]}, va_multiplier_lo);
@@ -67,9 +79,10 @@
       vaprod${ABC[N:N+8]}hi = _mm_add_epi16(vaprod${ABC[N:N+8]}hi, _mm_mullo_epi16(va${ABC[N:N+8]}, va_multiplier_hi));
       vbprod${ABC[N:N+8]}hi = _mm_add_epi16(vbprod${ABC[N:N+8]}hi, _mm_mullo_epi16(vb${ABC[N:N+8]}, vb_multiplier_hi));
 
-    $for N in range(0, BATCH_TILE, 8):
-      vaprod${ABC[N:N+8]}hi = _mm_sub_epi16(vaprod${ABC[N:N+8]}hi, _mm_and_si128(_mm_srai_epi16(va${ABC[N:N+8]}, 15), va_multiplier_lo));
-      vbprod${ABC[N:N+8]}hi = _mm_sub_epi16(vbprod${ABC[N:N+8]}hi, _mm_and_si128(_mm_srai_epi16(vb${ABC[N:N+8]}, 15), vb_multiplier_lo));
+    $if DATATYPE == "QS8":
+      $for N in range(0, BATCH_TILE, 8):
+        vaprod${ABC[N:N+8]}hi = _mm_sub_epi16(vaprod${ABC[N:N+8]}hi, _mm_and_si128(_mm_srai_epi16(va${ABC[N:N+8]}, 15), va_multiplier_lo));
+        vbprod${ABC[N:N+8]}hi = _mm_sub_epi16(vbprod${ABC[N:N+8]}hi, _mm_and_si128(_mm_srai_epi16(vb${ABC[N:N+8]}, 15), vb_multiplier_lo));
 
     $for N in range(0, BATCH_TILE, 8):
       __m128i vacc${ABC[N:N+4]} = _mm_add_epi32(vbias, _mm_unpacklo_epi16(vaprod${ABC[N:N+8]}lo, vaprod${ABC[N:N+8]}hi));
@@ -85,7 +98,7 @@
     $for N in range(0, BATCH_TILE, 8):
       __m128i vout${ABC[N:N+8]} = _mm_adds_epi16(_mm_packs_epi32(vacc${ABC[N:N+4]}, vacc${ABC[N+4:N+8]}), voutput_zero_point);
 
-    $if SSE < 4:
+    $if DATATYPE == "QS8" and SSE < 4:
       $for N in range(0, BATCH_TILE, 8):
         vout${ABC[N:N+8]} = _mm_max_epi16(vout${ABC[N:N+8]}, voutput_min);
 
@@ -94,22 +107,22 @@
 
     $for N in range(0, BATCH_TILE, 16):
       $if N + 8 < BATCH_TILE:
-        __m128i vout${ABC[N:N+16]} = _mm_packs_epi16(vout${ABC[N:N+8]}, vout${ABC[N+8:N+16]});
+        __m128i vout${ABC[N:N+16]} = ${_MM_PACKXS_EPI16}(vout${ABC[N:N+8]}, vout${ABC[N+8:N+16]});
       $else:
-        __m128i vout${ABC[N:N+8]}${ABC[N:N+8]} = _mm_packs_epi16(vout${ABC[N:N+8]}, vout${ABC[N:N+8]});
+        __m128i vout${ABC[N:N+8]}${ABC[N:N+8]} = ${_MM_PACKXS_EPI16}(vout${ABC[N:N+8]}, vout${ABC[N:N+8]});
 
-    $if SSE == 4:
+    $if DATATYPE == "QU8" or SSE == 4:
       $for N in range(0, BATCH_TILE, 16):
         $if N + 8 < BATCH_TILE:
-          vout${ABC[N:N+16]} = _mm_max_epi8(vout${ABC[N:N+16]}, voutput_min);
+          vout${ABC[N:N+16]} = ${_MM_MAX_EPX8}(vout${ABC[N:N+16]}, voutput_min);
         $else:
-          vout${ABC[N:N+8]}${ABC[N:N+8]} = _mm_max_epi8(vout${ABC[N:N+8]}${ABC[N:N+8]}, voutput_min);
+          vout${ABC[N:N+8]}${ABC[N:N+8]} = ${_MM_MAX_EPX8}(vout${ABC[N:N+8]}${ABC[N:N+8]}, voutput_min);
 
       $for N in range(0, BATCH_TILE, 16):
         $if N + 8 < BATCH_TILE:
-          vout${ABC[N:N+16]} = _mm_min_epi8(vout${ABC[N:N+16]}, voutput_max);
+          vout${ABC[N:N+16]} = ${_MM_MIN_EPX8}(vout${ABC[N:N+16]}, voutput_max);
         $else:
-          vout${ABC[N:N+8]}${ABC[N:N+8]} = _mm_min_epi8(vout${ABC[N:N+8]}${ABC[N:N+8]}, voutput_max);
+          vout${ABC[N:N+8]}${ABC[N:N+8]} = ${_MM_MIN_EPX8}(vout${ABC[N:N+8]}${ABC[N:N+8]}, voutput_max);
 
     $if BATCH_TILE >= 16:
       _mm_storeu_si128((__m128i*) output, vout${ABC[0:16]});
@@ -125,8 +138,8 @@
   if XNN_UNLIKELY(n != 0) {
     ${"do " if BATCH_TILE > 8 else ""}{
       $if SSE == 4:
-        const __m128i va${ABC[0:8]} = _mm_cvtepi8_epi16(_mm_loadl_epi64((const __m128i*) input_a));
-        const __m128i vb${ABC[0:8]} = _mm_cvtepi8_epi16(_mm_loadl_epi64((const __m128i*) input_b));
+        const __m128i va${ABC[0:8]} = ${_MM_CVTEPI8_EPI16}(_mm_loadl_epi64((const __m128i*) input_a));
+        const __m128i vb${ABC[0:8]} = ${_MM_CVTEPI8_EPI16}(_mm_loadl_epi64((const __m128i*) input_b));
       $else:
         __m128i va${ABC[0:8]} = _mm_loadl_epi64((const __m128i*) input_a);
         __m128i vb${ABC[0:8]} = _mm_loadl_epi64((const __m128i*) input_b);
@@ -135,8 +148,13 @@
         input_b += 8;
 
       $if SSE < 4:
-        va${ABC[0:8]} = _mm_srai_epi16(_mm_unpacklo_epi8(va${ABC[0:8]}, va${ABC[0:8]}), 8);
-        vb${ABC[0:8]} = _mm_srai_epi16(_mm_unpacklo_epi8(vb${ABC[0:8]}, vb${ABC[0:8]}), 8);
+        $if DATATYPE == "QU8":
+          const __m128i vzero = _mm_setzero_si128();
+          va${ABC[0:8]} = _mm_unpacklo_epi8(va${ABC[0:8]}, vzero);
+          vb${ABC[0:8]} = _mm_unpacklo_epi8(vb${ABC[0:8]}, vzero);
+        $else:
+          va${ABC[0:8]} = _mm_srai_epi16(_mm_unpacklo_epi8(va${ABC[0:8]}, va${ABC[0:8]}), 8);
+          vb${ABC[0:8]} = _mm_srai_epi16(_mm_unpacklo_epi8(vb${ABC[0:8]}, vb${ABC[0:8]}), 8);
 
       __m128i vaprod${ABC[0:8]}hi = _mm_mulhi_epu16(va${ABC[0:8]}, va_multiplier_lo);
       __m128i vbprod${ABC[0:8]}hi = _mm_mulhi_epu16(vb${ABC[0:8]}, vb_multiplier_lo);
@@ -146,8 +164,9 @@
       vaprod${ABC[0:8]}hi = _mm_add_epi16(vaprod${ABC[0:8]}hi, _mm_mullo_epi16(va${ABC[0:8]}, va_multiplier_hi));
       vbprod${ABC[0:8]}hi = _mm_add_epi16(vbprod${ABC[0:8]}hi, _mm_mullo_epi16(vb${ABC[0:8]}, vb_multiplier_hi));
 
-      vaprod${ABC[0:8]}hi = _mm_sub_epi16(vaprod${ABC[0:8]}hi, _mm_and_si128(_mm_srai_epi16(va${ABC[0:8]}, 15), va_multiplier_lo));
-      vbprod${ABC[0:8]}hi = _mm_sub_epi16(vbprod${ABC[0:8]}hi, _mm_and_si128(_mm_srai_epi16(vb${ABC[0:8]}, 15), vb_multiplier_lo));
+      $if DATATYPE == "QS8":
+        vaprod${ABC[0:8]}hi = _mm_sub_epi16(vaprod${ABC[0:8]}hi, _mm_and_si128(_mm_srai_epi16(va${ABC[0:8]}, 15), va_multiplier_lo));
+        vbprod${ABC[0:8]}hi = _mm_sub_epi16(vbprod${ABC[0:8]}hi, _mm_and_si128(_mm_srai_epi16(vb${ABC[0:8]}, 15), vb_multiplier_lo));
 
       __m128i vacc${ABC[0:4]} = _mm_add_epi32(vbias, _mm_unpacklo_epi16(vaprod${ABC[0:8]}lo, vaprod${ABC[0:8]}hi));
       __m128i vacc${ABC[4:8]} = _mm_add_epi32(vbias, _mm_unpackhi_epi16(vaprod${ABC[0:8]}lo, vaprod${ABC[0:8]}hi));
@@ -159,55 +178,61 @@
       vacc${ABC[4:8]} = _mm_sra_epi32(_mm_add_epi32(vacc${ABC[4:8]}, vrounding), vshift);
 
       __m128i vout${ABC[0:8]} = _mm_adds_epi16(_mm_packs_epi32(vacc${ABC[0:4]}, vacc${ABC[4:8]}), voutput_zero_point);
-      $if SSE < 4:
+      $if DATATYPE == "QS8" and SSE < 4:
         vout${ABC[0:8]} = _mm_max_epi16(vout${ABC[0:8]}, voutput_min);
         vout${ABC[0:8]} = _mm_min_epi16(vout${ABC[0:8]}, voutput_max);
 
-      __m128i vout${ABC[0:8]}${ABC[0:8]} = _mm_packs_epi16(vout${ABC[0:8]}, vout${ABC[0:8]});
-      $if SSE == 4:
-        vout${ABC[0:8]}${ABC[0:8]} = _mm_max_epi8(vout${ABC[0:8]}${ABC[0:8]}, voutput_min);
-        vout${ABC[0:8]}${ABC[0:8]} = _mm_min_epi8(vout${ABC[0:8]}${ABC[0:8]}, voutput_max);
+      __m128i vout${ABC[0:8]}${ABC[0:8]} = ${_MM_PACKXS_EPI16}(vout${ABC[0:8]}, vout${ABC[0:8]});
+      $if DATATYPE == "QU8" or SSE == 4:
+        vout${ABC[0:8]}${ABC[0:8]} = ${_MM_MAX_EPX8}(vout${ABC[0:8]}${ABC[0:8]}, voutput_min);
+        vout${ABC[0:8]}${ABC[0:8]} = ${_MM_MIN_EPX8}(vout${ABC[0:8]}${ABC[0:8]}, voutput_max);
 
       $if BATCH_TILE > 8:
-        if XNN_LIKELY(n >= (8 * sizeof(int8_t))) {
+        if XNN_LIKELY(n >= (8 * sizeof(${XINT8_T}))) {
           _mm_storel_epi64((__m128i*) output, vout${ABC[0:8]}${ABC[0:8]});
           output += 8;
-          n -= 8 * sizeof(int8_t);
+          n -= 8 * sizeof(${XINT8_T});
         } else {
-          if (n & (4 * sizeof(int8_t))) {
+          if (n & (4 * sizeof(${XINT8_T}))) {
             *((uint32_t*) output) = (uint32_t) _mm_cvtsi128_si32(vout${ABC[0:8]}${ABC[0:8]});
             vout${ABC[0:8]}${ABC[0:8]} = _mm_srli_epi64(vout${ABC[0:8]}${ABC[0:8]}, 32);
             output += 4;
           }
-          if (n & (2 * sizeof(int8_t))) {
-            *((uint16_t*) output) = (uint16_t) _mm_extract_epi16(vout${ABC[0:8]}${ABC[0:8]}, 0);
+          if (n & (2 * sizeof(${XINT8_T}))) {
+            $if SSE == 4:
+              *((uint16_t*) output) = (uint16_t) _mm_extract_epi16(vout${ABC[0:8]}${ABC[0:8]}, 0);
+            $else:
+              *((uint16_t*) output) = (uint16_t) _mm_cvtsi128_si32(vout${ABC[0:8]}${ABC[0:8]});
             vout${ABC[0:8]}${ABC[0:8]} = _mm_srli_epi32(vout${ABC[0:8]}${ABC[0:8]}, 16);
             output += 2;
           }
-          if (n & (1 * sizeof(int8_t))) {
+          if (n & (1 * sizeof(${XINT8_T}))) {
             $if SSE == 4:
-              *output = (int8_t) _mm_extract_epi8(vout${ABC[0:8]}${ABC[0:8]}, 0);
+              *output = (${XINT8_T}) _mm_extract_epi8(vout${ABC[0:8]}${ABC[0:8]}, 0);
             $else:
               *output = (int32_t) _mm_cvtsi128_si32(vout${ABC[0:8]}${ABC[0:8]});
           }
           n = 0;
         }
       $else:
-        if (n & (4 * sizeof(int8_t))) {
+        if (n & (4 * sizeof(${XINT8_T}))) {
           *((uint32_t*) output) = (uint32_t) _mm_cvtsi128_si32(vout${ABC[0:8]}${ABC[0:8]});
           vout${ABC[0:8]}${ABC[0:8]} = _mm_srli_epi64(vout${ABC[0:8]}${ABC[0:8]}, 32);
           output += 4;
         }
-        if (n & (2 * sizeof(int8_t))) {
-          *((uint16_t*) output) = (uint16_t) _mm_extract_epi16(vout${ABC[0:8]}${ABC[0:8]}, 0);
+        if (n & (2 * sizeof(${XINT8_T}))) {
+          $if SSE == 4:
+            *((uint16_t*) output) = (uint16_t) _mm_extract_epi16(vout${ABC[0:8]}${ABC[0:8]}, 0);
+          $else:
+            *((uint16_t*) output) = (uint16_t) _mm_cvtsi128_si32(vout${ABC[0:8]}${ABC[0:8]});
           vout${ABC[0:8]}${ABC[0:8]} = _mm_srli_epi32(vout${ABC[0:8]}${ABC[0:8]}, 16);
           output += 2;
         }
-        if (n & (1 * sizeof(int8_t))) {
+        if (n & (1 * sizeof(${XINT8_T}))) {
           $if SSE == 4:
-            *output = (int8_t) _mm_extract_epi8(vout${ABC[0:8]}${ABC[0:8]}, 0);
+            *output = (${XINT8_T}) _mm_extract_epi8(vout${ABC[0:8]}${ABC[0:8]}, 0);
           $else:
-            *output = (int32_t) _mm_cvtsi128_si32(vout${ABC[0:8]}${ABC[0:8]});
+            *output = (${XINT8_T}) _mm_cvtsi128_si32(vout${ABC[0:8]}${ABC[0:8]});
         }
     }${" while (n != 0);" if BATCH_TILE > 8 else ""}
   }
diff --git a/src/qs8-vadd/sse-mul32-ld32.c.in b/src/qs8-vadd/sse-mul32-ld32.c.in
index 9acb7df..64a2ed9 100644
--- a/src/qs8-vadd/sse-mul32-ld32.c.in
+++ b/src/qs8-vadd/sse-mul32-ld32.c.in
@@ -3,6 +3,7 @@
 // This source code is licensed under the BSD-style license found in the
 // LICENSE file in the root directory of this source tree.
 
+$assert DATATYPE in ["QS8", "QU8"]
 $assert SSE == 4
 $assert not XOP or AVX
 $assert BATCH_TILE % 8 == 0
diff --git a/src/qs8-vadd/wasmsimd.c.in b/src/qs8-vadd/wasmsimd.c.in
index 5ec76f4..3776c59 100644
--- a/src/qs8-vadd/wasmsimd.c.in
+++ b/src/qs8-vadd/wasmsimd.c.in
@@ -3,6 +3,7 @@
 // This source code is licensed under the BSD-style license found in the
 // LICENSE file in the root directory of this source tree.
 
+$assert DATATYPE in ["QS8", "QU8"]
 $assert BATCH_TILE % 8 == 0
 $assert BATCH_TILE >= 8
 $ABC = "0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZ"
@@ -13,12 +14,19 @@
 #include <xnnpack/vadd.h>
 
 
-void xnn_qs8_vadd_minmax_ukernel__wasmsimd_x${BATCH_TILE}(
+$XINT8_T = {"QS8": "int8_t", "QU8": "uint8_t"}[DATATYPE]
+$WASM_X16X8_LOAD8X8 = {"QS8": "wasm_i16x8_load8x8", "QU8": "wasm_u16x8_load8x8"}[DATATYPE]
+$WASM_X32X4_EXTEND_LOW_X16X8 = {"QS8": "wasm_i32x4_extend_low_i16x8", "QU8": "wasm_u32x4_extend_low_u16x8"}[DATATYPE]
+$WASM_X32X4_EXTEND_HIGH_X16X8 = {"QS8": "wasm_i32x4_extend_high_i16x8", "QU8": "wasm_u32x4_extend_high_u16x8"}[DATATYPE]
+$WASM_X8X16_NARROW_I16X8 = {"QS8": "wasm_i8x16_narrow_i16x8", "QU8": "wasm_u8x16_narrow_i16x8"}[DATATYPE]
+$WASM_X8X16_MIN = {"QS8": "wasm_i8x16_min", "QU8": "wasm_u8x16_min"}[DATATYPE]
+$WASM_X8X16_MAX = {"QS8": "wasm_i8x16_max", "QU8": "wasm_u8x16_max"}[DATATYPE]
+void xnn_${DATATYPE.lower()}_vadd_minmax_ukernel__wasmsimd_x${BATCH_TILE}(
     size_t n,
-    const int8_t* input_a,
-    const int8_t* input_b,
-    int8_t* output,
-    const union xnn_qs8_add_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_DISABLE_TSAN
+    const ${XINT8_T}* input_a,
+    const ${XINT8_T}* input_b,
+    ${XINT8_T}* output,
+    const union xnn_${DATATYPE.lower()}_add_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_DISABLE_TSAN
 {
   const v128_t vbias = wasm_v128_load(params->wasmsimd.bias);
   const v128_t va_multiplier = wasm_v128_load(params->wasmsimd.a_multiplier);
@@ -29,22 +37,22 @@
   const v128_t voutput_min = wasm_v128_load(params->wasmsimd.output_min);
   const v128_t voutput_max = wasm_v128_load(params->wasmsimd.output_max);
 
-  for (; n >= ${BATCH_TILE} * sizeof(int8_t); n -= ${BATCH_TILE} * sizeof(int8_t)) {
-    const v128_t va${ABC[0:8]} = wasm_i16x8_load8x8(input_a);
-    const v128_t vb${ABC[0:8]} = wasm_i16x8_load8x8(input_b);
+  for (; n >= ${BATCH_TILE} * sizeof(${XINT8_T}); n -= ${BATCH_TILE} * sizeof(${XINT8_T})) {
+    const v128_t va${ABC[0:8]} = ${WASM_X16X8_LOAD8X8}(input_a);
+    const v128_t vb${ABC[0:8]} = ${WASM_X16X8_LOAD8X8}(input_b);
     $for N in range(8, BATCH_TILE, 8):
-      const v128_t va${ABC[N:N+8]} = wasm_i16x8_load8x8(input_a + ${N});
-      const v128_t vb${ABC[N:N+8]} = wasm_i16x8_load8x8(input_b + ${N});
+      const v128_t va${ABC[N:N+8]} = ${WASM_X16X8_LOAD8X8}(input_a + ${N});
+      const v128_t vb${ABC[N:N+8]} = ${WASM_X16X8_LOAD8X8}(input_b + ${N});
     input_a += ${BATCH_TILE};
     input_b += ${BATCH_TILE};
 
     $for N in range(0, BATCH_TILE, 8):
-      v128_t vacc${ABC[N:N+4]} = wasm_i32x4_add(vbias, wasm_i32x4_mul(wasm_i32x4_extend_low_i16x8(va${ABC[N:N+8]}), va_multiplier));
-      v128_t vacc${ABC[N+4:N+8]} = wasm_i32x4_add(vbias, wasm_i32x4_mul(wasm_i32x4_extend_high_i16x8(va${ABC[N:N+8]}), va_multiplier));
+      v128_t vacc${ABC[N:N+4]} = wasm_i32x4_add(vbias, wasm_i32x4_mul(${WASM_X32X4_EXTEND_LOW_X16X8}(va${ABC[N:N+8]}), va_multiplier));
+      v128_t vacc${ABC[N+4:N+8]} = wasm_i32x4_add(vbias, wasm_i32x4_mul(${WASM_X32X4_EXTEND_HIGH_X16X8}(va${ABC[N:N+8]}), va_multiplier));
 
     $for N in range(0, BATCH_TILE, 8):
-      vacc${ABC[N:N+4]} = wasm_i32x4_add(vacc${ABC[N:N+4]}, wasm_i32x4_mul(wasm_i32x4_extend_low_i16x8(vb${ABC[N:N+8]}), vb_multiplier));
-      vacc${ABC[N+4:N+8]} = wasm_i32x4_add(vacc${ABC[N+4:N+8]}, wasm_i32x4_mul(wasm_i32x4_extend_high_i16x8(vb${ABC[N:N+8]}), vb_multiplier));
+      vacc${ABC[N:N+4]} = wasm_i32x4_add(vacc${ABC[N:N+4]}, wasm_i32x4_mul(${WASM_X32X4_EXTEND_LOW_X16X8}(vb${ABC[N:N+8]}), vb_multiplier));
+      vacc${ABC[N+4:N+8]} = wasm_i32x4_add(vacc${ABC[N+4:N+8]}, wasm_i32x4_mul(${WASM_X32X4_EXTEND_HIGH_X16X8}(vb${ABC[N:N+8]}), vb_multiplier));
 
     $for N in range(0, BATCH_TILE, 4):
       vacc${ABC[N:N+4]} = wasm_i32x4_shr(wasm_i32x4_add(vacc${ABC[N:N+4]}, vrounding), vshift);
@@ -54,21 +62,21 @@
 
     $for N in range(0, BATCH_TILE, 16):
       $if N + 8 < BATCH_TILE:
-        v128_t vout${ABC[N:N+16]} = wasm_i8x16_narrow_i16x8(vout${ABC[N:N+8]}, vout${ABC[N+8:N+16]});
+        v128_t vout${ABC[N:N+16]} = ${WASM_X8X16_NARROW_I16X8}(vout${ABC[N:N+8]}, vout${ABC[N+8:N+16]});
       $else:
-        v128_t vout${ABC[N:N+8]}${ABC[N:N+8]} = wasm_i8x16_narrow_i16x8(vout${ABC[N:N+8]}, vout${ABC[N:N+8]});
+        v128_t vout${ABC[N:N+8]}${ABC[N:N+8]} = ${WASM_X8X16_NARROW_I16X8}(vout${ABC[N:N+8]}, vout${ABC[N:N+8]});
 
     $for N in range(0, BATCH_TILE, 16):
       $if N + 8 < BATCH_TILE:
-        vout${ABC[N:N+16]} = wasm_i8x16_max(vout${ABC[N:N+16]}, voutput_min);
+        vout${ABC[N:N+16]} = ${WASM_X8X16_MAX}(vout${ABC[N:N+16]}, voutput_min);
       $else:
-        vout${ABC[N:N+8]}${ABC[N:N+8]} = wasm_i8x16_max(vout${ABC[N:N+8]}${ABC[N:N+8]}, voutput_min);
+        vout${ABC[N:N+8]}${ABC[N:N+8]} = ${WASM_X8X16_MAX}(vout${ABC[N:N+8]}${ABC[N:N+8]}, voutput_min);
 
     $for N in range(0, BATCH_TILE, 16):
       $if N + 8 < BATCH_TILE:
-        vout${ABC[N:N+16]} = wasm_i8x16_min(vout${ABC[N:N+16]}, voutput_max);
+        vout${ABC[N:N+16]} = ${WASM_X8X16_MIN}(vout${ABC[N:N+16]}, voutput_max);
       $else:
-        vout${ABC[N:N+8]}${ABC[N:N+8]} = wasm_i8x16_min(vout${ABC[N:N+8]}${ABC[N:N+8]}, voutput_max);
+        vout${ABC[N:N+8]}${ABC[N:N+8]} = ${WASM_X8X16_MIN}(vout${ABC[N:N+8]}${ABC[N:N+8]}, voutput_max);
 
     $if BATCH_TILE >= 16:
       wasm_v128_store(output, vout${ABC[0:16]});
@@ -83,61 +91,61 @@
   }
   if XNN_UNLIKELY(n != 0) {
     ${"do " if BATCH_TILE > 8 else ""}{
-      const v128_t va${ABC[0:8]} = wasm_i16x8_load8x8(input_a);
-      const v128_t vb${ABC[0:8]} = wasm_i16x8_load8x8(input_b);
+      const v128_t va${ABC[0:8]} = ${WASM_X16X8_LOAD8X8}(input_a);
+      const v128_t vb${ABC[0:8]} = ${WASM_X16X8_LOAD8X8}(input_b);
       $if BATCH_TILE > 8:
         input_a += 8;
         input_b += 8;
 
-      v128_t vacc${ABC[0:4]} = wasm_i32x4_add(vbias, wasm_i32x4_mul(wasm_i32x4_extend_low_i16x8(va${ABC[0:8]}), va_multiplier));
-      v128_t vacc${ABC[4:8]} = wasm_i32x4_add(vbias, wasm_i32x4_mul(wasm_i32x4_extend_high_i16x8(va${ABC[0:8]}), va_multiplier));
+      v128_t vacc${ABC[0:4]} = wasm_i32x4_add(vbias, wasm_i32x4_mul(${WASM_X32X4_EXTEND_LOW_X16X8}(va${ABC[0:8]}), va_multiplier));
+      v128_t vacc${ABC[4:8]} = wasm_i32x4_add(vbias, wasm_i32x4_mul(${WASM_X32X4_EXTEND_HIGH_X16X8}(va${ABC[0:8]}), va_multiplier));
 
-      vacc${ABC[0:4]} = wasm_i32x4_add(vacc${ABC[0:4]}, wasm_i32x4_mul(wasm_i32x4_extend_low_i16x8(vb${ABC[0:8]}), vb_multiplier));
-      vacc${ABC[4:8]} = wasm_i32x4_add(vacc${ABC[4:8]}, wasm_i32x4_mul(wasm_i32x4_extend_high_i16x8(vb${ABC[0:8]}), vb_multiplier));
+      vacc${ABC[0:4]} = wasm_i32x4_add(vacc${ABC[0:4]}, wasm_i32x4_mul(${WASM_X32X4_EXTEND_LOW_X16X8}(vb${ABC[0:8]}), vb_multiplier));
+      vacc${ABC[4:8]} = wasm_i32x4_add(vacc${ABC[4:8]}, wasm_i32x4_mul(${WASM_X32X4_EXTEND_HIGH_X16X8}(vb${ABC[0:8]}), vb_multiplier));
 
       vacc${ABC[0:4]} = wasm_i32x4_shr(wasm_i32x4_add(vacc${ABC[0:4]}, vrounding), vshift);
       vacc${ABC[4:8]} = wasm_i32x4_shr(wasm_i32x4_add(vacc${ABC[4:8]}, vrounding), vshift);
 
       v128_t vout${ABC[0:8]} = wasm_i16x8_add_sat(wasm_i16x8_narrow_i32x4(vacc${ABC[0:4]}, vacc${ABC[4:8]}), voutput_zero_point);
 
-      v128_t vout${ABC[0:8]}${ABC[0:8]} = wasm_i8x16_narrow_i16x8(vout${ABC[0:8]}, vout${ABC[0:8]});
-      vout${ABC[0:8]}${ABC[0:8]} = wasm_i8x16_max(vout${ABC[0:8]}${ABC[0:8]}, voutput_min);
-      vout${ABC[0:8]}${ABC[0:8]} = wasm_i8x16_min(vout${ABC[0:8]}${ABC[0:8]}, voutput_max);
+      v128_t vout${ABC[0:8]}${ABC[0:8]} = ${WASM_X8X16_NARROW_I16X8}(vout${ABC[0:8]}, vout${ABC[0:8]});
+      vout${ABC[0:8]}${ABC[0:8]} = ${WASM_X8X16_MAX}(vout${ABC[0:8]}${ABC[0:8]}, voutput_min);
+      vout${ABC[0:8]}${ABC[0:8]} = ${WASM_X8X16_MIN}(vout${ABC[0:8]}${ABC[0:8]}, voutput_max);
 
       $if BATCH_TILE > 8:
-        if XNN_LIKELY(n >= (8 * sizeof(int8_t))) {
+        if XNN_LIKELY(n >= (8 * sizeof(${XINT8_T}))) {
           *((double*) output) = wasm_f64x2_extract_lane(vout${ABC[0:8]}${ABC[0:8]}, 0);
           output += 8;
-          n -= 8 * sizeof(int8_t);
+          n -= 8 * sizeof(${XINT8_T});
         } else {
-          if (n & (4 * sizeof(int8_t))) {
+          if (n & (4 * sizeof(${XINT8_T}))) {
             *((uint32_t*) output) = (uint32_t) wasm_i32x4_extract_lane(vout${ABC[0:8]}${ABC[0:8]}, 0);
             vout${ABC[0:8]}${ABC[0:8]} = wasm_u64x2_shr(vout${ABC[0:8]}${ABC[0:8]}, 32);
             output += 4;
           }
-          if (n & (2 * sizeof(int8_t))) {
+          if (n & (2 * sizeof(${XINT8_T}))) {
             *((uint16_t*) output) = (uint16_t) wasm_i16x8_extract_lane(vout${ABC[0:8]}${ABC[0:8]}, 0);
             vout${ABC[0:8]}${ABC[0:8]} = wasm_u32x4_shr(vout${ABC[0:8]}${ABC[0:8]}, 16);
             output += 2;
           }
-          if (n & (1 * sizeof(int8_t))) {
-            *output = wasm_i8x16_extract_lane(vout${ABC[0:8]}${ABC[0:8]}, 0);
+          if (n & (1 * sizeof(${XINT8_T}))) {
+            *output = (${XINT8_T}) wasm_i8x16_extract_lane(vout${ABC[0:8]}${ABC[0:8]}, 0);
           }
           n = 0;
         }
       $else:
-        if (n & (4 * sizeof(int8_t))) {
+        if (n & (4 * sizeof(${XINT8_T}))) {
           *((uint32_t*) output) = (uint32_t) wasm_i32x4_extract_lane(vout${ABC[0:8]}${ABC[0:8]}, 0);
           vout${ABC[0:8]}${ABC[0:8]} = wasm_u64x2_shr(vout${ABC[0:8]}${ABC[0:8]}, 32);
           output += 4;
         }
-        if (n & (2 * sizeof(int8_t))) {
+        if (n & (2 * sizeof(${XINT8_T}))) {
           *((uint16_t*) output) = (uint16_t) wasm_i16x8_extract_lane(vout${ABC[0:8]}${ABC[0:8]}, 0);
           vout${ABC[0:8]}${ABC[0:8]} = wasm_u32x4_shr(vout${ABC[0:8]}${ABC[0:8]}, 16);
           output += 2;
         }
-        if (n & (1 * sizeof(int8_t))) {
-          *output = wasm_i8x16_extract_lane(vout${ABC[0:8]}${ABC[0:8]}, 0);
+        if (n & (1 * sizeof(${XINT8_T}))) {
+          *output = (${XINT8_T}) wasm_i8x16_extract_lane(vout${ABC[0:8]}${ABC[0:8]}, 0);
         }
     }${" while (n != 0);" if BATCH_TILE > 8 else ""}
   }
diff --git a/src/qs8-vaddc/avx2-mul32-ld64.c.in b/src/qs8-vaddc/avx2-mul32-ld64.c.in
index 736a619..0992813 100644
--- a/src/qs8-vaddc/avx2-mul32-ld64.c.in
+++ b/src/qs8-vaddc/avx2-mul32-ld64.c.in
@@ -3,6 +3,7 @@
 // This source code is licensed under the BSD-style license found in the
 // LICENSE file in the root directory of this source tree.
 
+$assert DATATYPE in ["QS8", "QU8"]
 $assert BATCH_TILE % 8 == 0
 $assert BATCH_TILE >= 8
 $ABC = "0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZ"
diff --git a/src/qs8-vaddc/gen/minmax-neon-ld64-x16.c b/src/qs8-vaddc/gen/minmax-neon-ld64-x16.c
index 1ae6010..a86d2d5 100644
--- a/src/qs8-vaddc/gen/minmax-neon-ld64-x16.c
+++ b/src/qs8-vaddc/gen/minmax-neon-ld64-x16.c
@@ -13,7 +13,6 @@
 
 #include <xnnpack/vadd.h>
 
-#include <stdio.h>
 #include <inttypes.h>
 
 
@@ -33,7 +32,7 @@
 
   const int32_t vxb = (int32_t) *input_b - (int32_t) params->neon.b_zero_point;
   const int32_t vb = params->neon.b_multiplier;
-  const int32x4_t vb_bias = vdupq_n_s32(vxb * vb);
+  const int32x4_t vbias = vdupq_n_s32(vxb * vb);
 
   for (; n >= 16 * sizeof(int8_t); n -= 16 * sizeof(int8_t)) {
     const int8x8_t va01234567 = vld1_s8(input_a); input_a += 8;
@@ -42,10 +41,10 @@
     const int16x8_t vxa01234567 = vsubl_s8(va01234567, va_zero_point);
     const int16x8_t vxa89ABCDEF = vsubl_s8(va89ABCDEF, va_zero_point);
 
-    int32x4_t vacc0123 = vmlaq_s32(vb_bias, vmovl_s16(vget_low_s16(vxa01234567)), va_multiplier);
-    int32x4_t vacc4567 = vmlaq_s32(vb_bias, vmovl_s16(vget_high_s16(vxa01234567)), va_multiplier);
-    int32x4_t vacc89AB = vmlaq_s32(vb_bias, vmovl_s16(vget_low_s16(vxa89ABCDEF)), va_multiplier);
-    int32x4_t vaccCDEF = vmlaq_s32(vb_bias, vmovl_s16(vget_high_s16(vxa89ABCDEF)), va_multiplier);
+    int32x4_t vacc0123 = vmlaq_s32(vbias, vmovl_s16(vget_low_s16(vxa01234567)), va_multiplier);
+    int32x4_t vacc4567 = vmlaq_s32(vbias, vmovl_s16(vget_high_s16(vxa01234567)), va_multiplier);
+    int32x4_t vacc89AB = vmlaq_s32(vbias, vmovl_s16(vget_low_s16(vxa89ABCDEF)), va_multiplier);
+    int32x4_t vaccCDEF = vmlaq_s32(vbias, vmovl_s16(vget_high_s16(vxa89ABCDEF)), va_multiplier);
 
     vacc0123 = vrshlq_s32(vacc0123, vright_shift);
     vacc4567 = vrshlq_s32(vacc4567, vright_shift);
@@ -69,8 +68,8 @@
 
       const int16x8_t vxa01234567 = vsubl_s8(va01234567, va_zero_point);
 
-      int32x4_t vacc0123 = vmlaq_s32(vb_bias, vmovl_s16(vget_low_s16(vxa01234567)), va_multiplier);
-      int32x4_t vacc4567 = vmlaq_s32(vb_bias, vmovl_s16(vget_high_s16(vxa01234567)), va_multiplier);
+      int32x4_t vacc0123 = vmlaq_s32(vbias, vmovl_s16(vget_low_s16(vxa01234567)), va_multiplier);
+      int32x4_t vacc4567 = vmlaq_s32(vbias, vmovl_s16(vget_high_s16(vxa01234567)), va_multiplier);
 
       vacc0123 = vrshlq_s32(vacc0123, vright_shift);
       vacc4567 = vrshlq_s32(vacc4567, vright_shift);
diff --git a/src/qs8-vaddc/gen/minmax-neon-ld64-x24.c b/src/qs8-vaddc/gen/minmax-neon-ld64-x24.c
index a4dc5d2..e76e138 100644
--- a/src/qs8-vaddc/gen/minmax-neon-ld64-x24.c
+++ b/src/qs8-vaddc/gen/minmax-neon-ld64-x24.c
@@ -13,7 +13,6 @@
 
 #include <xnnpack/vadd.h>
 
-#include <stdio.h>
 #include <inttypes.h>
 
 
@@ -33,7 +32,7 @@
 
   const int32_t vxb = (int32_t) *input_b - (int32_t) params->neon.b_zero_point;
   const int32_t vb = params->neon.b_multiplier;
-  const int32x4_t vb_bias = vdupq_n_s32(vxb * vb);
+  const int32x4_t vbias = vdupq_n_s32(vxb * vb);
 
   for (; n >= 24 * sizeof(int8_t); n -= 24 * sizeof(int8_t)) {
     const int8x8_t va01234567 = vld1_s8(input_a); input_a += 8;
@@ -44,12 +43,12 @@
     const int16x8_t vxa89ABCDEF = vsubl_s8(va89ABCDEF, va_zero_point);
     const int16x8_t vxaGHIJKLMN = vsubl_s8(vaGHIJKLMN, va_zero_point);
 
-    int32x4_t vacc0123 = vmlaq_s32(vb_bias, vmovl_s16(vget_low_s16(vxa01234567)), va_multiplier);
-    int32x4_t vacc4567 = vmlaq_s32(vb_bias, vmovl_s16(vget_high_s16(vxa01234567)), va_multiplier);
-    int32x4_t vacc89AB = vmlaq_s32(vb_bias, vmovl_s16(vget_low_s16(vxa89ABCDEF)), va_multiplier);
-    int32x4_t vaccCDEF = vmlaq_s32(vb_bias, vmovl_s16(vget_high_s16(vxa89ABCDEF)), va_multiplier);
-    int32x4_t vaccGHIJ = vmlaq_s32(vb_bias, vmovl_s16(vget_low_s16(vxaGHIJKLMN)), va_multiplier);
-    int32x4_t vaccKLMN = vmlaq_s32(vb_bias, vmovl_s16(vget_high_s16(vxaGHIJKLMN)), va_multiplier);
+    int32x4_t vacc0123 = vmlaq_s32(vbias, vmovl_s16(vget_low_s16(vxa01234567)), va_multiplier);
+    int32x4_t vacc4567 = vmlaq_s32(vbias, vmovl_s16(vget_high_s16(vxa01234567)), va_multiplier);
+    int32x4_t vacc89AB = vmlaq_s32(vbias, vmovl_s16(vget_low_s16(vxa89ABCDEF)), va_multiplier);
+    int32x4_t vaccCDEF = vmlaq_s32(vbias, vmovl_s16(vget_high_s16(vxa89ABCDEF)), va_multiplier);
+    int32x4_t vaccGHIJ = vmlaq_s32(vbias, vmovl_s16(vget_low_s16(vxaGHIJKLMN)), va_multiplier);
+    int32x4_t vaccKLMN = vmlaq_s32(vbias, vmovl_s16(vget_high_s16(vxaGHIJKLMN)), va_multiplier);
 
     vacc0123 = vrshlq_s32(vacc0123, vright_shift);
     vacc4567 = vrshlq_s32(vacc4567, vright_shift);
@@ -80,8 +79,8 @@
 
       const int16x8_t vxa01234567 = vsubl_s8(va01234567, va_zero_point);
 
-      int32x4_t vacc0123 = vmlaq_s32(vb_bias, vmovl_s16(vget_low_s16(vxa01234567)), va_multiplier);
-      int32x4_t vacc4567 = vmlaq_s32(vb_bias, vmovl_s16(vget_high_s16(vxa01234567)), va_multiplier);
+      int32x4_t vacc0123 = vmlaq_s32(vbias, vmovl_s16(vget_low_s16(vxa01234567)), va_multiplier);
+      int32x4_t vacc4567 = vmlaq_s32(vbias, vmovl_s16(vget_high_s16(vxa01234567)), va_multiplier);
 
       vacc0123 = vrshlq_s32(vacc0123, vright_shift);
       vacc4567 = vrshlq_s32(vacc4567, vright_shift);
diff --git a/src/qs8-vaddc/gen/minmax-neon-ld64-x32.c b/src/qs8-vaddc/gen/minmax-neon-ld64-x32.c
index c2fa0ad..6fdaa55 100644
--- a/src/qs8-vaddc/gen/minmax-neon-ld64-x32.c
+++ b/src/qs8-vaddc/gen/minmax-neon-ld64-x32.c
@@ -13,7 +13,6 @@
 
 #include <xnnpack/vadd.h>
 
-#include <stdio.h>
 #include <inttypes.h>
 
 
@@ -33,7 +32,7 @@
 
   const int32_t vxb = (int32_t) *input_b - (int32_t) params->neon.b_zero_point;
   const int32_t vb = params->neon.b_multiplier;
-  const int32x4_t vb_bias = vdupq_n_s32(vxb * vb);
+  const int32x4_t vbias = vdupq_n_s32(vxb * vb);
 
   for (; n >= 32 * sizeof(int8_t); n -= 32 * sizeof(int8_t)) {
     const int8x8_t va01234567 = vld1_s8(input_a); input_a += 8;
@@ -46,14 +45,14 @@
     const int16x8_t vxaGHIJKLMN = vsubl_s8(vaGHIJKLMN, va_zero_point);
     const int16x8_t vxaOPQRSTUV = vsubl_s8(vaOPQRSTUV, va_zero_point);
 
-    int32x4_t vacc0123 = vmlaq_s32(vb_bias, vmovl_s16(vget_low_s16(vxa01234567)), va_multiplier);
-    int32x4_t vacc4567 = vmlaq_s32(vb_bias, vmovl_s16(vget_high_s16(vxa01234567)), va_multiplier);
-    int32x4_t vacc89AB = vmlaq_s32(vb_bias, vmovl_s16(vget_low_s16(vxa89ABCDEF)), va_multiplier);
-    int32x4_t vaccCDEF = vmlaq_s32(vb_bias, vmovl_s16(vget_high_s16(vxa89ABCDEF)), va_multiplier);
-    int32x4_t vaccGHIJ = vmlaq_s32(vb_bias, vmovl_s16(vget_low_s16(vxaGHIJKLMN)), va_multiplier);
-    int32x4_t vaccKLMN = vmlaq_s32(vb_bias, vmovl_s16(vget_high_s16(vxaGHIJKLMN)), va_multiplier);
-    int32x4_t vaccOPQR = vmlaq_s32(vb_bias, vmovl_s16(vget_low_s16(vxaOPQRSTUV)), va_multiplier);
-    int32x4_t vaccSTUV = vmlaq_s32(vb_bias, vmovl_s16(vget_high_s16(vxaOPQRSTUV)), va_multiplier);
+    int32x4_t vacc0123 = vmlaq_s32(vbias, vmovl_s16(vget_low_s16(vxa01234567)), va_multiplier);
+    int32x4_t vacc4567 = vmlaq_s32(vbias, vmovl_s16(vget_high_s16(vxa01234567)), va_multiplier);
+    int32x4_t vacc89AB = vmlaq_s32(vbias, vmovl_s16(vget_low_s16(vxa89ABCDEF)), va_multiplier);
+    int32x4_t vaccCDEF = vmlaq_s32(vbias, vmovl_s16(vget_high_s16(vxa89ABCDEF)), va_multiplier);
+    int32x4_t vaccGHIJ = vmlaq_s32(vbias, vmovl_s16(vget_low_s16(vxaGHIJKLMN)), va_multiplier);
+    int32x4_t vaccKLMN = vmlaq_s32(vbias, vmovl_s16(vget_high_s16(vxaGHIJKLMN)), va_multiplier);
+    int32x4_t vaccOPQR = vmlaq_s32(vbias, vmovl_s16(vget_low_s16(vxaOPQRSTUV)), va_multiplier);
+    int32x4_t vaccSTUV = vmlaq_s32(vbias, vmovl_s16(vget_high_s16(vxaOPQRSTUV)), va_multiplier);
 
     vacc0123 = vrshlq_s32(vacc0123, vright_shift);
     vacc4567 = vrshlq_s32(vacc4567, vright_shift);
@@ -87,8 +86,8 @@
 
       const int16x8_t vxa01234567 = vsubl_s8(va01234567, va_zero_point);
 
-      int32x4_t vacc0123 = vmlaq_s32(vb_bias, vmovl_s16(vget_low_s16(vxa01234567)), va_multiplier);
-      int32x4_t vacc4567 = vmlaq_s32(vb_bias, vmovl_s16(vget_high_s16(vxa01234567)), va_multiplier);
+      int32x4_t vacc0123 = vmlaq_s32(vbias, vmovl_s16(vget_low_s16(vxa01234567)), va_multiplier);
+      int32x4_t vacc4567 = vmlaq_s32(vbias, vmovl_s16(vget_high_s16(vxa01234567)), va_multiplier);
 
       vacc0123 = vrshlq_s32(vacc0123, vright_shift);
       vacc4567 = vrshlq_s32(vacc4567, vright_shift);
diff --git a/src/qs8-vaddc/gen/minmax-neon-ld64-x8.c b/src/qs8-vaddc/gen/minmax-neon-ld64-x8.c
index 68f42c3..43f45e6 100644
--- a/src/qs8-vaddc/gen/minmax-neon-ld64-x8.c
+++ b/src/qs8-vaddc/gen/minmax-neon-ld64-x8.c
@@ -13,7 +13,6 @@
 
 #include <xnnpack/vadd.h>
 
-#include <stdio.h>
 #include <inttypes.h>
 
 
@@ -33,15 +32,15 @@
 
   const int32_t vxb = (int32_t) *input_b - (int32_t) params->neon.b_zero_point;
   const int32_t vb = params->neon.b_multiplier;
-  const int32x4_t vb_bias = vdupq_n_s32(vxb * vb);
+  const int32x4_t vbias = vdupq_n_s32(vxb * vb);
 
   for (; n >= 8 * sizeof(int8_t); n -= 8 * sizeof(int8_t)) {
     const int8x8_t va01234567 = vld1_s8(input_a); input_a += 8;
 
     const int16x8_t vxa01234567 = vsubl_s8(va01234567, va_zero_point);
 
-    int32x4_t vacc0123 = vmlaq_s32(vb_bias, vmovl_s16(vget_low_s16(vxa01234567)), va_multiplier);
-    int32x4_t vacc4567 = vmlaq_s32(vb_bias, vmovl_s16(vget_high_s16(vxa01234567)), va_multiplier);
+    int32x4_t vacc0123 = vmlaq_s32(vbias, vmovl_s16(vget_low_s16(vxa01234567)), va_multiplier);
+    int32x4_t vacc4567 = vmlaq_s32(vbias, vmovl_s16(vget_high_s16(vxa01234567)), va_multiplier);
 
     vacc0123 = vrshlq_s32(vacc0123, vright_shift);
     vacc4567 = vrshlq_s32(vacc4567, vright_shift);
@@ -62,8 +61,8 @@
 
       const int16x8_t vxa01234567 = vsubl_s8(va01234567, va_zero_point);
 
-      int32x4_t vacc0123 = vmlaq_s32(vb_bias, vmovl_s16(vget_low_s16(vxa01234567)), va_multiplier);
-      int32x4_t vacc4567 = vmlaq_s32(vb_bias, vmovl_s16(vget_high_s16(vxa01234567)), va_multiplier);
+      int32x4_t vacc0123 = vmlaq_s32(vbias, vmovl_s16(vget_low_s16(vxa01234567)), va_multiplier);
+      int32x4_t vacc4567 = vmlaq_s32(vbias, vmovl_s16(vget_high_s16(vxa01234567)), va_multiplier);
 
       vacc0123 = vrshlq_s32(vacc0123, vright_shift);
       vacc4567 = vrshlq_s32(vacc4567, vright_shift);
diff --git a/src/qs8-vaddc/gen/minmax-scalar-x2.c b/src/qs8-vaddc/gen/minmax-scalar-x2.c
index 238b71d..fb76ce6 100644
--- a/src/qs8-vaddc/gen/minmax-scalar-x2.c
+++ b/src/qs8-vaddc/gen/minmax-scalar-x2.c
@@ -49,8 +49,8 @@
     vout0 += voutput_zero_point;
     vout1 += voutput_zero_point;
 
-    output[0] = vout0;
-    output[1] = vout1;
+    output[0] = (int8_t) vout0;
+    output[1] = (int8_t) vout1;
     output += 2;
   }
   if XNN_UNLIKELY(n != 0) {
diff --git a/src/qs8-vaddc/gen/minmax-scalar-x4.c b/src/qs8-vaddc/gen/minmax-scalar-x4.c
index e06c1db..fa8ce4a 100644
--- a/src/qs8-vaddc/gen/minmax-scalar-x4.c
+++ b/src/qs8-vaddc/gen/minmax-scalar-x4.c
@@ -61,10 +61,10 @@
     vout2 += voutput_zero_point;
     vout3 += voutput_zero_point;
 
-    output[0] = vout0;
-    output[1] = vout1;
-    output[2] = vout2;
-    output[3] = vout3;
+    output[0] = (int8_t) vout0;
+    output[1] = (int8_t) vout1;
+    output[2] = (int8_t) vout2;
+    output[3] = (int8_t) vout3;
     output += 4;
   }
   if XNN_UNLIKELY(n != 0) {
diff --git a/src/qs8-vaddc/gen/minmax-sse2-mul16-ld64-x16.c b/src/qs8-vaddc/gen/minmax-sse2-mul16-ld64-x16.c
index 927e3ca..5fdc40f 100644
--- a/src/qs8-vaddc/gen/minmax-sse2-mul16-ld64-x16.c
+++ b/src/qs8-vaddc/gen/minmax-sse2-mul16-ld64-x16.c
@@ -113,7 +113,7 @@
           output += 4;
         }
         if (n & (2 * sizeof(int8_t))) {
-          *((uint16_t*) output) = (uint16_t) _mm_extract_epi16(vout0123456701234567, 0);
+          *((uint16_t*) output) = (uint16_t) _mm_cvtsi128_si32(vout0123456701234567);
           vout0123456701234567 = _mm_srli_epi32(vout0123456701234567, 16);
           output += 2;
         }
diff --git a/src/qs8-vaddc/gen/minmax-sse2-mul16-ld64-x24.c b/src/qs8-vaddc/gen/minmax-sse2-mul16-ld64-x24.c
index 00d833c..7278c84 100644
--- a/src/qs8-vaddc/gen/minmax-sse2-mul16-ld64-x24.c
+++ b/src/qs8-vaddc/gen/minmax-sse2-mul16-ld64-x24.c
@@ -128,7 +128,7 @@
           output += 4;
         }
         if (n & (2 * sizeof(int8_t))) {
-          *((uint16_t*) output) = (uint16_t) _mm_extract_epi16(vout0123456701234567, 0);
+          *((uint16_t*) output) = (uint16_t) _mm_cvtsi128_si32(vout0123456701234567);
           vout0123456701234567 = _mm_srli_epi32(vout0123456701234567, 16);
           output += 2;
         }
diff --git a/src/qs8-vaddc/gen/minmax-sse2-mul16-ld64-x32.c b/src/qs8-vaddc/gen/minmax-sse2-mul16-ld64-x32.c
index 41dd72b..5de0c08 100644
--- a/src/qs8-vaddc/gen/minmax-sse2-mul16-ld64-x32.c
+++ b/src/qs8-vaddc/gen/minmax-sse2-mul16-ld64-x32.c
@@ -141,7 +141,7 @@
           output += 4;
         }
         if (n & (2 * sizeof(int8_t))) {
-          *((uint16_t*) output) = (uint16_t) _mm_extract_epi16(vout0123456701234567, 0);
+          *((uint16_t*) output) = (uint16_t) _mm_cvtsi128_si32(vout0123456701234567);
           vout0123456701234567 = _mm_srli_epi32(vout0123456701234567, 16);
           output += 2;
         }
diff --git a/src/qs8-vaddc/gen/minmax-sse2-mul16-ld64-x8.c b/src/qs8-vaddc/gen/minmax-sse2-mul16-ld64-x8.c
index 36b0671..9ca8391 100644
--- a/src/qs8-vaddc/gen/minmax-sse2-mul16-ld64-x8.c
+++ b/src/qs8-vaddc/gen/minmax-sse2-mul16-ld64-x8.c
@@ -94,12 +94,12 @@
         output += 4;
       }
       if (n & (2 * sizeof(int8_t))) {
-        *((uint16_t*) output) = (uint16_t) _mm_extract_epi16(vout0123456701234567, 0);
+        *((uint16_t*) output) = (uint16_t) _mm_cvtsi128_si32(vout0123456701234567);
         vout0123456701234567 = _mm_srli_epi32(vout0123456701234567, 16);
         output += 2;
       }
       if (n & (1 * sizeof(int8_t))) {
-        *output = (int32_t) _mm_cvtsi128_si32(vout0123456701234567);
+        *output = (int8_t) _mm_cvtsi128_si32(vout0123456701234567);
       }
     }
   }
diff --git a/src/qs8-vaddc/gen/minmax-wasmsimd-x16.c b/src/qs8-vaddc/gen/minmax-wasmsimd-x16.c
index d23ce97..d5cdc12 100644
--- a/src/qs8-vaddc/gen/minmax-wasmsimd-x16.c
+++ b/src/qs8-vaddc/gen/minmax-wasmsimd-x16.c
@@ -91,7 +91,7 @@
           output += 2;
         }
         if (n & (1 * sizeof(int8_t))) {
-          *output = wasm_i8x16_extract_lane(vout0123456701234567, 0);
+          *output = (int8_t) wasm_i8x16_extract_lane(vout0123456701234567, 0);
         }
         n = 0;
       }
diff --git a/src/qs8-vaddc/gen/minmax-wasmsimd-x24.c b/src/qs8-vaddc/gen/minmax-wasmsimd-x24.c
index 8bc4eb4..9d35e6f 100644
--- a/src/qs8-vaddc/gen/minmax-wasmsimd-x24.c
+++ b/src/qs8-vaddc/gen/minmax-wasmsimd-x24.c
@@ -101,7 +101,7 @@
           output += 2;
         }
         if (n & (1 * sizeof(int8_t))) {
-          *output = wasm_i8x16_extract_lane(vout0123456701234567, 0);
+          *output = (int8_t) wasm_i8x16_extract_lane(vout0123456701234567, 0);
         }
         n = 0;
       }
diff --git a/src/qs8-vaddc/gen/minmax-wasmsimd-x32.c b/src/qs8-vaddc/gen/minmax-wasmsimd-x32.c
index 2afd1f9..ed00d5e 100644
--- a/src/qs8-vaddc/gen/minmax-wasmsimd-x32.c
+++ b/src/qs8-vaddc/gen/minmax-wasmsimd-x32.c
@@ -107,7 +107,7 @@
           output += 2;
         }
         if (n & (1 * sizeof(int8_t))) {
-          *output = wasm_i8x16_extract_lane(vout0123456701234567, 0);
+          *output = (int8_t) wasm_i8x16_extract_lane(vout0123456701234567, 0);
         }
         n = 0;
       }
diff --git a/src/qs8-vaddc/gen/minmax-wasmsimd-x8.c b/src/qs8-vaddc/gen/minmax-wasmsimd-x8.c
index 16c8959..c2885fd 100644
--- a/src/qs8-vaddc/gen/minmax-wasmsimd-x8.c
+++ b/src/qs8-vaddc/gen/minmax-wasmsimd-x8.c
@@ -79,7 +79,7 @@
         output += 2;
       }
       if (n & (1 * sizeof(int8_t))) {
-        *output = wasm_i8x16_extract_lane(vout0123456701234567, 0);
+        *output = (int8_t) wasm_i8x16_extract_lane(vout0123456701234567, 0);
       }
     }
   }
diff --git a/src/qs8-vaddc/neon-ld64.c.in b/src/qs8-vaddc/neon-ld64.c.in
index b49ba75..023ede0 100644
--- a/src/qs8-vaddc/neon-ld64.c.in
+++ b/src/qs8-vaddc/neon-ld64.c.in
@@ -3,6 +3,7 @@
 // This source code is licensed under the BSD-style license found in the
 // LICENSE file in the root directory of this source tree.
 
+$assert DATATYPE in ["QS8", "QU8"]
 $assert BATCH_TILE % 8 == 0
 $assert BATCH_TILE >= 8
 $ABC = "0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZ"
@@ -12,38 +13,59 @@
 
 #include <xnnpack/vadd.h>
 
-#include <stdio.h>
 #include <inttypes.h>
 
 
-void xnn_qs8_vaddc_minmax_ukernel__neon_ld64_x${BATCH_TILE}(
+$XINT8_T = {"QS8": "int8_t", "QU8": "uint8_t"}[DATATYPE]
+$XINT8X8_T = {"QS8": "int8x8_t", "QU8": "uint8x8_t"}[DATATYPE]
+$XINT8X16_T = {"QS8": "int8x16_t", "QU8": "uint8x16_t"}[DATATYPE]
+$VLD1_X8 = {"QS8": "vld1_s8", "QU8": "vld1_u8"}[DATATYPE]
+$VLD1_DUP_X8 = {"QS8": "vld1_dup_s8", "QU8": "vld1_dup_u8"}[DATATYPE]
+$VLD1Q_DUP_X8 = {"QS8": "vld1q_dup_s8", "QU8": "vld1q_dup_u8"}[DATATYPE]
+$VST1_LANE_X8 = {"QS8": "vst1_lane_s8", "QU8": "vst1_lane_u8"}[DATATYPE]
+$VST1_X8 = {"QS8": "vst1_s8", "QU8": "vst1_u8"}[DATATYPE]
+$VST1Q_X8 = {"QS8": "vst1q_s8", "QU8": "vst1q_u8"}[DATATYPE]
+$VMIN_X8 = {"QS8": "vmin_s8", "QU8": "vmin_u8"}[DATATYPE]
+$VMAX_X8 = {"QS8": "vmax_s8", "QU8": "vmax_u8"}[DATATYPE]
+$VMINQ_X8 = {"QS8": "vminq_s8", "QU8": "vminq_u8"}[DATATYPE]
+$VMAXQ_X8 = {"QS8": "vmaxq_s8", "QU8": "vmaxq_u8"}[DATATYPE]
+$VQMOVXN_S16 = {"QS8": "vqmovn_s16", "QU8": "vqmovun_s16"}[DATATYPE]
+$VEXT_X8 = {"QS8": "vext_s8", "QU8": "vext_u8"}[DATATYPE]
+$VGET_LOW_X8 = {"QS8": "vget_low_s8", "QU8": "vget_low_u8"}[DATATYPE]
+$VCOMBINE_X8 = {"QS8": "vcombine_s8", "QU8": "vcombine_u8"}[DATATYPE]
+$VREINTERPRET_U32_X8 = {"QS8": "vreinterpret_u32_s8", "QU8": "vreinterpret_u32_u8"}[DATATYPE]
+$VREINTERPRET_U16_X8 = {"QS8": "vreinterpret_u16_s8", "QU8": "vreinterpret_u16_u8"}[DATATYPE]
+void xnn_${DATATYPE.lower()}_vaddc_minmax_ukernel__neon_ld64_x${BATCH_TILE}(
     size_t n,
-    const int8_t* input_a,
-    const int8_t* input_b,
-    int8_t* output,
-    const union xnn_qs8_add_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_DISABLE_TSAN
+    const ${XINT8_T}* input_a,
+    const ${XINT8_T}* input_b,
+    ${XINT8_T}* output,
+    const union xnn_${DATATYPE.lower()}_add_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_DISABLE_TSAN
 {
-  const int8x8_t va_zero_point = vld1_dup_s8(&params->neon.a_zero_point);
+  const ${XINT8X8_T} va_zero_point = ${VLD1_DUP_X8}(&params->neon.a_zero_point);
   const int32x4_t va_multiplier = vld1q_dup_s32(&params->neon.a_multiplier);
   const int32x4_t vright_shift = vld1q_dup_s32(&params->neon.right_shift);
   const int16x8_t voutput_zero_point = vld1q_dup_s16(&params->neon.output_zero_point);
-  const int8x16_t voutput_min = vld1q_dup_s8(&params->neon.output_min);
-  const int8x16_t voutput_max = vld1q_dup_s8(&params->neon.output_max);
+  const ${XINT8X16_T} voutput_min = ${VLD1Q_DUP_X8}(&params->neon.output_min);
+  const ${XINT8X16_T} voutput_max = ${VLD1Q_DUP_X8}(&params->neon.output_max);
 
   const int32_t vxb = (int32_t) *input_b - (int32_t) params->neon.b_zero_point;
   const int32_t vb = params->neon.b_multiplier;
-  const int32x4_t vb_bias = vdupq_n_s32(vxb * vb);
+  const int32x4_t vbias = vdupq_n_s32(vxb * vb);
 
-  for (; n >= ${BATCH_TILE} * sizeof(int8_t); n -= ${BATCH_TILE} * sizeof(int8_t)) {
+  for (; n >= ${BATCH_TILE} * sizeof(${XINT8_T}); n -= ${BATCH_TILE} * sizeof(${XINT8_T})) {
     $for N in range(0, BATCH_TILE, 8):
-      const int8x8_t va${ABC[N:N+8]} = vld1_s8(input_a); input_a += 8;
+      const ${XINT8X8_T} va${ABC[N:N+8]} = ${VLD1_X8}(input_a); input_a += 8;
 
     $for N in range(0, BATCH_TILE, 8):
-      const int16x8_t vxa${ABC[N:N+8]} = vsubl_s8(va${ABC[N:N+8]}, va_zero_point);
+      $if DATATYPE == "QU8":
+        const int16x8_t vxa${ABC[N:N+8]} = vreinterpretq_s16_u16(vsubl_u8(va${ABC[N:N+8]}, va_zero_point));
+      $else:
+        const int16x8_t vxa${ABC[N:N+8]} = vsubl_s8(va${ABC[N:N+8]}, va_zero_point);
 
     $for N in range(0, BATCH_TILE, 8):
-      int32x4_t vacc${ABC[N:N+4]} = vmlaq_s32(vb_bias, vmovl_s16(vget_low_s16(vxa${ABC[N:N+8]})), va_multiplier);
-      int32x4_t vacc${ABC[N+4:N+8]} = vmlaq_s32(vb_bias, vmovl_s16(vget_high_s16(vxa${ABC[N:N+8]})), va_multiplier);
+      int32x4_t vacc${ABC[N:N+4]} = vmlaq_s32(vbias, vmovl_s16(vget_low_s16(vxa${ABC[N:N+8]})), va_multiplier);
+      int32x4_t vacc${ABC[N+4:N+8]} = vmlaq_s32(vbias, vmovl_s16(vget_high_s16(vxa${ABC[N:N+8]})), va_multiplier);
 
     $for N in range(0, BATCH_TILE, 4):
       vacc${ABC[N:N+4]} = vrshlq_s32(vacc${ABC[N:N+4]}, vright_shift);
@@ -53,78 +75,81 @@
 
     $for N in range(0, BATCH_TILE, 16):
       $if N + 8 < BATCH_TILE:
-        int8x16_t vout${ABC[N:N+16]} = vcombine_s8(vqmovn_s16(vacc${ABC[N:N+8]}), vqmovn_s16(vacc${ABC[N+8:N+16]}));
+        ${XINT8X16_T} vout${ABC[N:N+16]} = ${VCOMBINE_X8}(${VQMOVXN_S16}(vacc${ABC[N:N+8]}), ${VQMOVXN_S16}(vacc${ABC[N+8:N+16]}));
       $else:
-        int8x8_t vout${ABC[N:N+8]} = vqmovn_s16(vacc${ABC[N:N+8]});
+        ${XINT8X8_T} vout${ABC[N:N+8]} = ${VQMOVXN_S16}(vacc${ABC[N:N+8]});
 
     $for N in range(0, BATCH_TILE, 16):
       $if N + 8 < BATCH_TILE:
-        vout${ABC[N:N+16]} = vmaxq_s8(vout${ABC[N:N+16]}, voutput_min);
+        vout${ABC[N:N+16]} = ${VMAXQ_X8}(vout${ABC[N:N+16]}, voutput_min);
       $else:
-        vout${ABC[N:N+8]} = vmax_s8(vout${ABC[N:N+8]}, vget_low_s8(voutput_min));
+        vout${ABC[N:N+8]} = ${VMAX_X8}(vout${ABC[N:N+8]}, ${VGET_LOW_X8}(voutput_min));
 
     $for N in range(0, BATCH_TILE, 16):
       $if N + 8 < BATCH_TILE:
-        vout${ABC[N:N+16]} = vminq_s8(vout${ABC[N:N+16]}, voutput_max);
+        vout${ABC[N:N+16]} = ${VMINQ_X8}(vout${ABC[N:N+16]}, voutput_max);
       $else:
-        vout${ABC[N:N+8]} = vmin_s8(vout${ABC[N:N+8]}, vget_low_s8(voutput_max));
+        vout${ABC[N:N+8]} = ${VMIN_X8}(vout${ABC[N:N+8]}, ${VGET_LOW_X8}(voutput_max));
 
     $for N in range(0, BATCH_TILE, 16):
       $if N + 8 < BATCH_TILE:
-        vst1q_s8(output, vout${ABC[N:N+16]}); output += 16;
+        ${VST1Q_X8}(output, vout${ABC[N:N+16]}); output += 16;
       $else:
-        vst1_s8(output, vout${ABC[N:N+8]}); output += 8;
+        ${VST1_X8}(output, vout${ABC[N:N+8]}); output += 8;
   }
   if XNN_UNLIKELY(n != 0) {
     ${"do " if BATCH_TILE > 8 else ""}{
       $if BATCH_TILE > 8:
-        const int8x8_t va${ABC[0:8]} = vld1_s8(input_a); input_a += 8;
+        const ${XINT8X8_T} va${ABC[0:8]} = ${VLD1_X8}(input_a); input_a += 8;
       $else:
-        const int8x8_t va${ABC[0:8]} = vld1_s8(input_a);
+        const ${XINT8X8_T} va${ABC[0:8]} = ${VLD1_X8}(input_a);
 
-      const int16x8_t vxa${ABC[0:8]} = vsubl_s8(va${ABC[0:8]}, va_zero_point);
+      $if DATATYPE == "QU8":
+        const int16x8_t vxa${ABC[0:8]} = vreinterpretq_s16_u16(vsubl_u8(va${ABC[0:8]}, va_zero_point));
+      $else:
+        const int16x8_t vxa${ABC[0:8]} = vsubl_s8(va${ABC[0:8]}, va_zero_point);
 
-      int32x4_t vacc${ABC[0:4]} = vmlaq_s32(vb_bias, vmovl_s16(vget_low_s16(vxa${ABC[0:8]})), va_multiplier);
-      int32x4_t vacc${ABC[4:8]} = vmlaq_s32(vb_bias, vmovl_s16(vget_high_s16(vxa${ABC[0:8]})), va_multiplier);
+      int32x4_t vacc${ABC[0:4]} = vmlaq_s32(vbias, vmovl_s16(vget_low_s16(vxa${ABC[0:8]})), va_multiplier);
+      int32x4_t vacc${ABC[4:8]} = vmlaq_s32(vbias, vmovl_s16(vget_high_s16(vxa${ABC[0:8]})), va_multiplier);
 
       vacc${ABC[0:4]} = vrshlq_s32(vacc${ABC[0:4]}, vright_shift);
       vacc${ABC[4:8]} = vrshlq_s32(vacc${ABC[4:8]}, vright_shift);
 
       const int16x8_t vacc${ABC[0:8]} = vqaddq_s16(vcombine_s16(vqmovn_s32(vacc${ABC[0:4]}), vqmovn_s32(vacc${ABC[4:8]})), voutput_zero_point);
 
-      int8x8_t vout${ABC[0:8]} = vqmovn_s16(vacc${ABC[0:8]});
-      vout${ABC[0:8]} = vmax_s8(vout${ABC[0:8]}, vget_low_s8(voutput_min));
-      vout${ABC[0:8]} = vmin_s8(vout${ABC[0:8]}, vget_low_s8(voutput_max));
+      ${XINT8X8_T} vout${ABC[0:8]} = ${VQMOVXN_S16}(vacc${ABC[0:8]});
+      vout${ABC[0:8]} = ${VMAX_X8}(vout${ABC[0:8]}, ${VGET_LOW_X8}(voutput_min));
+      vout${ABC[0:8]} = ${VMIN_X8}(vout${ABC[0:8]}, ${VGET_LOW_X8}(voutput_max));
 
       $if BATCH_TILE > 8:
-        if XNN_LIKELY(n >= (8 * sizeof(int8_t))) {
-          vst1_s8(output, vout${ABC[0:8]}); output += 8;
-          n -= 8 * sizeof(int8_t);
+        if XNN_LIKELY(n >= (8 * sizeof(${XINT8_T}))) {
+          ${VST1_X8}(output, vout${ABC[0:8]}); output += 8;
+          n -= 8 * sizeof(${XINT8_T});
         } else {
-          if (n & (4 * sizeof(int8_t))) {
-            vst1_lane_u32(__builtin_assume_aligned(output, 1), vreinterpret_u32_s8(vout${ABC[0:8]}), 0); output += 4;
-            vout${ABC[0:8]} = vext_s8(vout${ABC[0:8]}, vout${ABC[0:8]}, 4);
+          if (n & (4 * sizeof(${XINT8_T}))) {
+            vst1_lane_u32(__builtin_assume_aligned(output, 1), ${VREINTERPRET_U32_X8}(vout${ABC[0:8]}), 0); output += 4;
+            vout${ABC[0:8]} = ${VEXT_X8}(vout${ABC[0:8]}, vout${ABC[0:8]}, 4);
           }
-          if (n & (2 * sizeof(int8_t))) {
-            vst1_lane_u16(__builtin_assume_aligned(output, 1), vreinterpret_u16_s8(vout${ABC[0:8]}), 0); output += 2;
-            vout${ABC[0:8]} = vext_s8(vout${ABC[0:8]}, vout${ABC[0:8]}, 2);
+          if (n & (2 * sizeof(${XINT8_T}))) {
+            vst1_lane_u16(__builtin_assume_aligned(output, 1), ${VREINTERPRET_U16_X8}(vout${ABC[0:8]}), 0); output += 2;
+            vout${ABC[0:8]} = ${VEXT_X8}(vout${ABC[0:8]}, vout${ABC[0:8]}, 2);
           }
-          if (n & (1 * sizeof(int8_t))) {
-            vst1_lane_s8(output, vout${ABC[0:8]}, 0);
+          if (n & (1 * sizeof(${XINT8_T}))) {
+            ${VST1_LANE_X8}(output, vout${ABC[0:8]}, 0);
           }
           n = 0;
         }
       $else:
-        if (n & (4 * sizeof(int8_t))) {
-          vst1_lane_u32(__builtin_assume_aligned(output, 1), vreinterpret_u32_s8(vout${ABC[0:8]}), 0); output += 4;
-          vout${ABC[0:8]} = vext_s8(vout${ABC[0:8]}, vout${ABC[0:8]}, 4);
+        if (n & (4 * sizeof(${XINT8_T}))) {
+          vst1_lane_u32(__builtin_assume_aligned(output, 1), ${VREINTERPRET_U32_X8}(vout${ABC[0:8]}), 0); output += 4;
+          vout${ABC[0:8]} = ${VEXT_X8}(vout${ABC[0:8]}, vout${ABC[0:8]}, 4);
         }
-        if (n & (2 * sizeof(int8_t))) {
-          vst1_lane_u16(__builtin_assume_aligned(output, 1), vreinterpret_u16_s8(vout${ABC[0:8]}), 0); output += 2;
-          vout${ABC[0:8]} = vext_s8(vout${ABC[0:8]}, vout${ABC[0:8]}, 2);
+        if (n & (2 * sizeof(${XINT8_T}))) {
+          vst1_lane_u16(__builtin_assume_aligned(output, 1), ${VREINTERPRET_U16_X8}(vout${ABC[0:8]}), 0); output += 2;
+          vout${ABC[0:8]} = ${VEXT_X8}(vout${ABC[0:8]}, vout${ABC[0:8]}, 2);
         }
-        if (n & (1 * sizeof(int8_t))) {
-          vst1_lane_s8(output, vout${ABC[0:8]}, 0);
+        if (n & (1 * sizeof(${XINT8_T}))) {
+          ${VST1_LANE_X8}(output, vout${ABC[0:8]}, 0);
         }
     }${" while (n != 0);" if BATCH_TILE > 8 else ""}
   }
diff --git a/src/qs8-vaddc/scalar.c.in b/src/qs8-vaddc/scalar.c.in
index 6e183aa..0b5b922 100644
--- a/src/qs8-vaddc/scalar.c.in
+++ b/src/qs8-vaddc/scalar.c.in
@@ -3,6 +3,7 @@
 // This source code is licensed under the BSD-style license found in the
 // LICENSE file in the root directory of this source tree.
 
+$assert DATATYPE in ["QS8", "QU8"]
 $assert BATCH_TILE >= 1
 #include <assert.h>
 
@@ -10,12 +11,13 @@
 #include <xnnpack/vadd.h>
 
 
-void xnn_qs8_vaddc_minmax_ukernel__scalar_x${BATCH_TILE}(
+$XINT8_T = {"QS8": "int8_t", "QU8": "uint8_t"}[DATATYPE]
+void xnn_${DATATYPE.lower()}_vaddc_minmax_ukernel__scalar_x${BATCH_TILE}(
     size_t n,
-    const int8_t* input_a,
-    const int8_t* input_b,
-    int8_t* output,
-    const union xnn_qs8_add_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_DISABLE_TSAN
+    const ${XINT8_T}* input_a,
+    const ${XINT8_T}* input_b,
+    ${XINT8_T}* output,
+    const union xnn_${DATATYPE.lower()}_add_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_DISABLE_TSAN
 {
   const int32_t vbias = params->scalar.bias + (int32_t) *input_b * params->scalar.b_multiplier;
   const int32_t va_multiplier = params->scalar.a_multiplier;
@@ -33,12 +35,12 @@
       int32_t vout = asr_s32(vacc + vrounding, vshift);
       vout = math_max_s32(vout, voutput_min_less_zero_point);
       vout = math_min_s32(vout, voutput_max_less_zero_point);
-      *output++ = (int8_t) (vout + voutput_zero_point);
+      *output++ = (${XINT8_T}) (vout + voutput_zero_point);
 
-      n -= sizeof(int8_t);
+      n -= sizeof(${XINT8_T});
     } while (n != 0);
   $else:
-    for (; n >= ${BATCH_TILE} * sizeof(int8_t); n -= ${BATCH_TILE} * sizeof(int8_t)) {
+    for (; n >= ${BATCH_TILE} * sizeof(${XINT8_T}); n -= ${BATCH_TILE} * sizeof(${XINT8_T})) {
       $for N in range(BATCH_TILE):
         const int32_t va${N} = input_a[${N}];
       input_a += ${BATCH_TILE};
@@ -60,7 +62,7 @@
         vout${N} += voutput_zero_point;
 
       $for N in range(BATCH_TILE):
-        output[${N}] = vout${N};
+        output[${N}] = (${XINT8_T}) vout${N};
       output += ${BATCH_TILE};
     }
     if XNN_UNLIKELY(n != 0) {
@@ -71,7 +73,7 @@
         int32_t vout = asr_s32(vacc + vrounding, vshift);
         vout = math_max_s32(vout, voutput_min_less_zero_point);
         vout = math_min_s32(vout, voutput_max_less_zero_point);
-        *output++ = (int8_t) (vout + voutput_zero_point);
+        *output++ = (${XINT8_T}) (vout + voutput_zero_point);
       $else:
         do {
           const int32_t va = *input_a++;
@@ -80,9 +82,9 @@
           int32_t vout = asr_s32(vacc + vrounding, vshift);
           vout = math_max_s32(vout, voutput_min_less_zero_point);
           vout = math_min_s32(vout, voutput_max_less_zero_point);
-          *output++ = (int8_t) (vout + voutput_zero_point);
+          *output++ = (${XINT8_T}) (vout + voutput_zero_point);
 
-          n -= sizeof(int8_t);
+          n -= sizeof(${XINT8_T});
         } while (n != 0);
     }
 }
diff --git a/src/qs8-vaddc/sse-mul16-ld64.c.in b/src/qs8-vaddc/sse-mul16-ld64.c.in
index be7a57e..cc5f4b1 100644
--- a/src/qs8-vaddc/sse-mul16-ld64.c.in
+++ b/src/qs8-vaddc/sse-mul16-ld64.c.in
@@ -3,6 +3,7 @@
 // This source code is licensed under the BSD-style license found in the
 // LICENSE file in the root directory of this source tree.
 
+$assert DATATYPE in ["QS8", "QU8"]
 $assert SSE in [2, 4]
 $assert not AVX or SSE == 4
 $SSE_HEADER = {2: "emmintrin.h", 4: "smmintrin.h"}[SSE]
@@ -17,13 +18,18 @@
 
 
 $PARAMS_STRUCT = "sse4_mul16" if SSE == 4 else "sse2"
+$XINT8_T = {"QS8": "int8_t", "QU8": "uint8_t"}[DATATYPE]
+$_MM_CVTEPI8_EPI16 = {"QS8": "_mm_cvtepi8_epi16", "QU8": "_mm_cvtepu8_epi16"}[DATATYPE]
+$_MM_PACKXS_EPI16 = {"QS8": "_mm_packs_epi16", "QU8": "_mm_packus_epi16"}[DATATYPE]
+$_MM_MIN_EPX8 = {"QS8": "_mm_min_epi8", "QU8": "_mm_min_epu8"}[DATATYPE]
+$_MM_MAX_EPX8 = {"QS8": "_mm_max_epi8", "QU8": "_mm_max_epu8"}[DATATYPE]
 $ISA = "avx" if AVX else {2: "sse2", 4: "sse41"}[SSE]
-void xnn_qs8_vaddc_minmax_ukernel__${ISA}_mul16_ld64_x${BATCH_TILE}(
+void xnn_${DATATYPE.lower()}_vaddc_minmax_ukernel__${ISA}_mul16_ld64_x${BATCH_TILE}(
     size_t n,
-    const int8_t* input_a,
-    const int8_t* input_b,
-    int8_t* output,
-    const union xnn_qs8_add_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_DISABLE_TSAN
+    const ${XINT8_T}* input_a,
+    const ${XINT8_T}* input_b,
+    ${XINT8_T}* output,
+    const union xnn_${DATATYPE.lower()}_add_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_DISABLE_TSAN
 {
   const __m128i vbias = _mm_add_epi32(
     _mm_shuffle_epi32(_mm_cvtsi32_si128(params->${PARAMS_STRUCT}.b_multiplier * (int32_t) *input_b), _MM_SHUFFLE(0, 0, 0, 0)),
@@ -36,11 +42,11 @@
   const __m128i voutput_min = _mm_load_si128((const __m128i*) params->${PARAMS_STRUCT}.output_min);
   const __m128i voutput_max = _mm_load_si128((const __m128i*) params->${PARAMS_STRUCT}.output_max);
 
-  for (; n >= ${BATCH_TILE} * sizeof(int8_t); n -= ${BATCH_TILE} * sizeof(int8_t)) {
+  for (; n >= ${BATCH_TILE} * sizeof(${XINT8_T}); n -= ${BATCH_TILE} * sizeof(${XINT8_T})) {
     $if SSE == 4:
-      const __m128i va${ABC[0:8]} = _mm_cvtepi8_epi16(_mm_loadl_epi64((const __m128i*) input_a));
+      const __m128i va${ABC[0:8]} = ${_MM_CVTEPI8_EPI16}(_mm_loadl_epi64((const __m128i*) input_a));
       $for N in range(8, BATCH_TILE, 8):
-        const __m128i va${ABC[N:N+8]} = _mm_cvtepi8_epi16(_mm_loadl_epi64((const __m128i*) (input_a + ${N})));
+        const __m128i va${ABC[N:N+8]} = ${_MM_CVTEPI8_EPI16}(_mm_loadl_epi64((const __m128i*) (input_a + ${N})));
     $else:
       __m128i va${ABC[0:8]} = _mm_loadl_epi64((const __m128i*) input_a);
       $for N in range(8, BATCH_TILE, 8):
@@ -48,8 +54,13 @@
     input_a += ${BATCH_TILE};
 
     $if SSE < 4:
-      $for N in range(0, BATCH_TILE, 8):
-        va${ABC[N:N+8]} = _mm_srai_epi16(_mm_unpacklo_epi8(va${ABC[N:N+8]}, va${ABC[N:N+8]}), 8);
+      $if DATATYPE == "QU8":
+        const __m128i vzero = _mm_setzero_si128();
+        $for N in range(0, BATCH_TILE, 8):
+          va${ABC[N:N+8]} = _mm_unpacklo_epi8(va${ABC[N:N+8]}, vzero);
+      $else:
+        $for N in range(0, BATCH_TILE, 8):
+          va${ABC[N:N+8]} = _mm_srai_epi16(_mm_unpacklo_epi8(va${ABC[N:N+8]}, va${ABC[N:N+8]}), 8);
 
     $for N in range(0, BATCH_TILE, 8):
       __m128i vaprod${ABC[N:N+8]}hi = _mm_mulhi_epu16(va${ABC[N:N+8]}, va_multiplier_lo);
@@ -58,8 +69,9 @@
     $for N in range(0, BATCH_TILE, 8):
       vaprod${ABC[N:N+8]}hi = _mm_add_epi16(vaprod${ABC[N:N+8]}hi, _mm_mullo_epi16(va${ABC[N:N+8]}, va_multiplier_hi));
 
-    $for N in range(0, BATCH_TILE, 8):
-      vaprod${ABC[N:N+8]}hi = _mm_sub_epi16(vaprod${ABC[N:N+8]}hi, _mm_and_si128(_mm_srai_epi16(va${ABC[N:N+8]}, 15), va_multiplier_lo));
+    $if DATATYPE == "QS8":
+      $for N in range(0, BATCH_TILE, 8):
+        vaprod${ABC[N:N+8]}hi = _mm_sub_epi16(vaprod${ABC[N:N+8]}hi, _mm_and_si128(_mm_srai_epi16(va${ABC[N:N+8]}, 15), va_multiplier_lo));
 
     $for N in range(0, BATCH_TILE, 8):
       __m128i vacc${ABC[N:N+4]} = _mm_add_epi32(vbias, _mm_unpacklo_epi16(vaprod${ABC[N:N+8]}lo, vaprod${ABC[N:N+8]}hi));
@@ -71,7 +83,7 @@
     $for N in range(0, BATCH_TILE, 8):
       __m128i vout${ABC[N:N+8]} = _mm_adds_epi16(_mm_packs_epi32(vacc${ABC[N:N+4]}, vacc${ABC[N+4:N+8]}), voutput_zero_point);
 
-    $if SSE < 4:
+    $if DATATYPE == "QS8" and SSE < 4:
       $for N in range(0, BATCH_TILE, 8):
         vout${ABC[N:N+8]} = _mm_max_epi16(vout${ABC[N:N+8]}, voutput_min);
 
@@ -80,22 +92,22 @@
 
     $for N in range(0, BATCH_TILE, 16):
       $if N + 8 < BATCH_TILE:
-        __m128i vout${ABC[N:N+16]} = _mm_packs_epi16(vout${ABC[N:N+8]}, vout${ABC[N+8:N+16]});
+        __m128i vout${ABC[N:N+16]} = ${_MM_PACKXS_EPI16}(vout${ABC[N:N+8]}, vout${ABC[N+8:N+16]});
       $else:
-        __m128i vout${ABC[N:N+8]}${ABC[N:N+8]} = _mm_packs_epi16(vout${ABC[N:N+8]}, vout${ABC[N:N+8]});
+        __m128i vout${ABC[N:N+8]}${ABC[N:N+8]} = ${_MM_PACKXS_EPI16}(vout${ABC[N:N+8]}, vout${ABC[N:N+8]});
 
-    $if SSE == 4:
+    $if DATATYPE == "QU8" or SSE == 4:
       $for N in range(0, BATCH_TILE, 16):
         $if N + 8 < BATCH_TILE:
-          vout${ABC[N:N+16]} = _mm_max_epi8(vout${ABC[N:N+16]}, voutput_min);
+          vout${ABC[N:N+16]} = ${_MM_MAX_EPX8}(vout${ABC[N:N+16]}, voutput_min);
         $else:
-          vout${ABC[N:N+8]}${ABC[N:N+8]} = _mm_max_epi8(vout${ABC[N:N+8]}${ABC[N:N+8]}, voutput_min);
+          vout${ABC[N:N+8]}${ABC[N:N+8]} = ${_MM_MAX_EPX8}(vout${ABC[N:N+8]}${ABC[N:N+8]}, voutput_min);
 
       $for N in range(0, BATCH_TILE, 16):
         $if N + 8 < BATCH_TILE:
-          vout${ABC[N:N+16]} = _mm_min_epi8(vout${ABC[N:N+16]}, voutput_max);
+          vout${ABC[N:N+16]} = ${_MM_MIN_EPX8}(vout${ABC[N:N+16]}, voutput_max);
         $else:
-          vout${ABC[N:N+8]}${ABC[N:N+8]} = _mm_min_epi8(vout${ABC[N:N+8]}${ABC[N:N+8]}, voutput_max);
+          vout${ABC[N:N+8]}${ABC[N:N+8]} = ${_MM_MIN_EPX8}(vout${ABC[N:N+8]}${ABC[N:N+8]}, voutput_max);
 
     $if BATCH_TILE >= 16:
       _mm_storeu_si128((__m128i*) output, vout${ABC[0:16]});
@@ -111,21 +123,25 @@
   if XNN_UNLIKELY(n != 0) {
     ${"do " if BATCH_TILE > 8 else ""}{
       $if SSE == 4:
-        const __m128i va${ABC[0:8]} = _mm_cvtepi8_epi16(_mm_loadl_epi64((const __m128i*) input_a));
+        const __m128i va${ABC[0:8]} = ${_MM_CVTEPI8_EPI16}(_mm_loadl_epi64((const __m128i*) input_a));
       $else:
         __m128i va${ABC[0:8]} = _mm_loadl_epi64((const __m128i*) input_a);
       $if BATCH_TILE > 8:
         input_a += 8;
 
       $if SSE < 4:
-        va${ABC[0:8]} = _mm_srai_epi16(_mm_unpacklo_epi8(va${ABC[0:8]}, va${ABC[0:8]}), 8);
+        $if DATATYPE == "QU8":
+          va${ABC[0:8]} = _mm_unpacklo_epi8(va${ABC[0:8]}, _mm_setzero_si128());
+        $else:
+          va${ABC[0:8]} = _mm_srai_epi16(_mm_unpacklo_epi8(va${ABC[0:8]}, va${ABC[0:8]}), 8);
 
       __m128i vaprod${ABC[0:8]}hi = _mm_mulhi_epu16(va${ABC[0:8]}, va_multiplier_lo);
       const __m128i vaprod${ABC[0:8]}lo = _mm_mullo_epi16(va${ABC[0:8]}, va_multiplier_lo);
 
       vaprod${ABC[0:8]}hi = _mm_add_epi16(vaprod${ABC[0:8]}hi, _mm_mullo_epi16(va${ABC[0:8]}, va_multiplier_hi));
 
-      vaprod${ABC[0:8]}hi = _mm_sub_epi16(vaprod${ABC[0:8]}hi, _mm_and_si128(_mm_srai_epi16(va${ABC[0:8]}, 15), va_multiplier_lo));
+      $if DATATYPE == "QS8":
+        vaprod${ABC[0:8]}hi = _mm_sub_epi16(vaprod${ABC[0:8]}hi, _mm_and_si128(_mm_srai_epi16(va${ABC[0:8]}, 15), va_multiplier_lo));
 
       __m128i vacc${ABC[0:4]} = _mm_add_epi32(vbias, _mm_unpacklo_epi16(vaprod${ABC[0:8]}lo, vaprod${ABC[0:8]}hi));
       __m128i vacc${ABC[4:8]} = _mm_add_epi32(vbias, _mm_unpackhi_epi16(vaprod${ABC[0:8]}lo, vaprod${ABC[0:8]}hi));
@@ -134,55 +150,61 @@
       vacc${ABC[4:8]} = _mm_sra_epi32(_mm_add_epi32(vacc${ABC[4:8]}, vrounding), vshift);
 
       __m128i vout${ABC[0:8]} = _mm_adds_epi16(_mm_packs_epi32(vacc${ABC[0:4]}, vacc${ABC[4:8]}), voutput_zero_point);
-      $if SSE < 4:
+      $if DATATYPE == "QS8" and SSE < 4:
         vout${ABC[0:8]} = _mm_max_epi16(vout${ABC[0:8]}, voutput_min);
         vout${ABC[0:8]} = _mm_min_epi16(vout${ABC[0:8]}, voutput_max);
 
-      __m128i vout${ABC[0:8]}${ABC[0:8]} = _mm_packs_epi16(vout${ABC[0:8]}, vout${ABC[0:8]});
-      $if SSE == 4:
-        vout${ABC[0:8]}${ABC[0:8]} = _mm_max_epi8(vout${ABC[0:8]}${ABC[0:8]}, voutput_min);
-        vout${ABC[0:8]}${ABC[0:8]} = _mm_min_epi8(vout${ABC[0:8]}${ABC[0:8]}, voutput_max);
+      __m128i vout${ABC[0:8]}${ABC[0:8]} = ${_MM_PACKXS_EPI16}(vout${ABC[0:8]}, vout${ABC[0:8]});
+      $if DATATYPE == "QU8" or SSE == 4:
+        vout${ABC[0:8]}${ABC[0:8]} = ${_MM_MAX_EPX8}(vout${ABC[0:8]}${ABC[0:8]}, voutput_min);
+        vout${ABC[0:8]}${ABC[0:8]} = ${_MM_MIN_EPX8}(vout${ABC[0:8]}${ABC[0:8]}, voutput_max);
 
       $if BATCH_TILE > 8:
-        if XNN_LIKELY(n >= (8 * sizeof(int8_t))) {
+        if XNN_LIKELY(n >= (8 * sizeof(${XINT8_T}))) {
           _mm_storel_epi64((__m128i*) output, vout${ABC[0:8]}${ABC[0:8]});
           output += 8;
-          n -= 8 * sizeof(int8_t);
+          n -= 8 * sizeof(${XINT8_T});
         } else {
-          if (n & (4 * sizeof(int8_t))) {
+          if (n & (4 * sizeof(${XINT8_T}))) {
             *((uint32_t*) output) = (uint32_t) _mm_cvtsi128_si32(vout${ABC[0:8]}${ABC[0:8]});
             vout${ABC[0:8]}${ABC[0:8]} = _mm_srli_epi64(vout${ABC[0:8]}${ABC[0:8]}, 32);
             output += 4;
           }
-          if (n & (2 * sizeof(int8_t))) {
-            *((uint16_t*) output) = (uint16_t) _mm_extract_epi16(vout${ABC[0:8]}${ABC[0:8]}, 0);
+          if (n & (2 * sizeof(${XINT8_T}))) {
+            $if SSE == 4:
+              *((uint16_t*) output) = (uint16_t) _mm_extract_epi16(vout${ABC[0:8]}${ABC[0:8]}, 0);
+            $else:
+              *((uint16_t*) output) = (uint16_t) _mm_cvtsi128_si32(vout${ABC[0:8]}${ABC[0:8]});
             vout${ABC[0:8]}${ABC[0:8]} = _mm_srli_epi32(vout${ABC[0:8]}${ABC[0:8]}, 16);
             output += 2;
           }
-          if (n & (1 * sizeof(int8_t))) {
+          if (n & (1 * sizeof(${XINT8_T}))) {
             $if SSE == 4:
-              *output = (int8_t) _mm_extract_epi8(vout${ABC[0:8]}${ABC[0:8]}, 0);
+              *output = (${XINT8_T}) _mm_extract_epi8(vout${ABC[0:8]}${ABC[0:8]}, 0);
             $else:
               *output = (int32_t) _mm_cvtsi128_si32(vout${ABC[0:8]}${ABC[0:8]});
           }
           n = 0;
         }
       $else:
-        if (n & (4 * sizeof(int8_t))) {
+        if (n & (4 * sizeof(${XINT8_T}))) {
           *((uint32_t*) output) = (uint32_t) _mm_cvtsi128_si32(vout${ABC[0:8]}${ABC[0:8]});
           vout${ABC[0:8]}${ABC[0:8]} = _mm_srli_epi64(vout${ABC[0:8]}${ABC[0:8]}, 32);
           output += 4;
         }
-        if (n & (2 * sizeof(int8_t))) {
-          *((uint16_t*) output) = (uint16_t) _mm_extract_epi16(vout${ABC[0:8]}${ABC[0:8]}, 0);
+        if (n & (2 * sizeof(${XINT8_T}))) {
+          $if SSE == 4:
+            *((uint16_t*) output) = (uint16_t) _mm_extract_epi16(vout${ABC[0:8]}${ABC[0:8]}, 0);
+          $else:
+            *((uint16_t*) output) = (uint16_t) _mm_cvtsi128_si32(vout${ABC[0:8]}${ABC[0:8]});
           vout${ABC[0:8]}${ABC[0:8]} = _mm_srli_epi32(vout${ABC[0:8]}${ABC[0:8]}, 16);
           output += 2;
         }
-        if (n & (1 * sizeof(int8_t))) {
+        if (n & (1 * sizeof(${XINT8_T}))) {
           $if SSE == 4:
-            *output = (int8_t) _mm_extract_epi8(vout${ABC[0:8]}${ABC[0:8]}, 0);
+            *output = (${XINT8_T}) _mm_extract_epi8(vout${ABC[0:8]}${ABC[0:8]}, 0);
           $else:
-            *output = (int32_t) _mm_cvtsi128_si32(vout${ABC[0:8]}${ABC[0:8]});
+            *output = (${XINT8_T}) _mm_cvtsi128_si32(vout${ABC[0:8]}${ABC[0:8]});
         }
     }${" while (n != 0);" if BATCH_TILE > 8 else ""}
   }
diff --git a/src/qs8-vaddc/sse-mul32-ld32.c.in b/src/qs8-vaddc/sse-mul32-ld32.c.in
index bf1dadb..c9e578d 100644
--- a/src/qs8-vaddc/sse-mul32-ld32.c.in
+++ b/src/qs8-vaddc/sse-mul32-ld32.c.in
@@ -3,6 +3,7 @@
 // This source code is licensed under the BSD-style license found in the
 // LICENSE file in the root directory of this source tree.
 
+$assert DATATYPE in ["QS8", "QU8"]
 $assert SSE == 4
 $assert not XOP or AVX
 $assert BATCH_TILE % 8 == 0
diff --git a/src/qs8-vaddc/wasmsimd.c.in b/src/qs8-vaddc/wasmsimd.c.in
index e5d65d4..a87d5a8 100644
--- a/src/qs8-vaddc/wasmsimd.c.in
+++ b/src/qs8-vaddc/wasmsimd.c.in
@@ -3,6 +3,7 @@
 // This source code is licensed under the BSD-style license found in the
 // LICENSE file in the root directory of this source tree.
 
+$assert DATATYPE in ["QS8", "QU8"]
 $assert BATCH_TILE % 8 == 0
 $assert BATCH_TILE >= 8
 $ABC = "0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZ"
@@ -13,12 +14,19 @@
 #include <xnnpack/vadd.h>
 
 
-void xnn_qs8_vaddc_minmax_ukernel__wasmsimd_x${BATCH_TILE}(
+$XINT8_T = {"QS8": "int8_t", "QU8": "uint8_t"}[DATATYPE]
+$WASM_X16X8_LOAD8X8 = {"QS8": "wasm_i16x8_load8x8", "QU8": "wasm_u16x8_load8x8"}[DATATYPE]
+$WASM_X32X4_EXTEND_LOW_X16X8 = {"QS8": "wasm_i32x4_extend_low_i16x8", "QU8": "wasm_u32x4_extend_low_u16x8"}[DATATYPE]
+$WASM_X32X4_EXTEND_HIGH_X16X8 = {"QS8": "wasm_i32x4_extend_high_i16x8", "QU8": "wasm_u32x4_extend_high_u16x8"}[DATATYPE]
+$WASM_X8X16_NARROW_I16X8 = {"QS8": "wasm_i8x16_narrow_i16x8", "QU8": "wasm_u8x16_narrow_i16x8"}[DATATYPE]
+$WASM_X8X16_MIN = {"QS8": "wasm_i8x16_min", "QU8": "wasm_u8x16_min"}[DATATYPE]
+$WASM_X8X16_MAX = {"QS8": "wasm_i8x16_max", "QU8": "wasm_u8x16_max"}[DATATYPE]
+void xnn_${DATATYPE.lower()}_vaddc_minmax_ukernel__wasmsimd_x${BATCH_TILE}(
     size_t n,
-    const int8_t* input_a,
-    const int8_t* input_b,
-    int8_t* output,
-    const union xnn_qs8_add_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_DISABLE_TSAN
+    const ${XINT8_T}* input_a,
+    const ${XINT8_T}* input_b,
+    ${XINT8_T}* output,
+    const union xnn_${DATATYPE.lower()}_add_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_DISABLE_TSAN
 {
   const v128_t va_multiplier = wasm_v128_load(params->wasmsimd.a_multiplier);
   const v128_t vrounding = wasm_v128_load(params->wasmsimd.rounding);
@@ -30,15 +38,15 @@
   v128_t vbias = wasm_i32x4_splat((int32_t) *input_b * params->wasmsimd.b_multiplier[0]);
   vbias = wasm_i32x4_add(vbias, wasm_v128_load(params->wasmsimd.bias));
 
-  for (; n >= ${BATCH_TILE} * sizeof(int8_t); n -= ${BATCH_TILE} * sizeof(int8_t)) {
-    const v128_t va${ABC[0:8]} = wasm_i16x8_load8x8(input_a);
+  for (; n >= ${BATCH_TILE} * sizeof(${XINT8_T}); n -= ${BATCH_TILE} * sizeof(${XINT8_T})) {
+    const v128_t va${ABC[0:8]} = ${WASM_X16X8_LOAD8X8}(input_a);
     $for N in range(8, BATCH_TILE, 8):
-      const v128_t va${ABC[N:N+8]} = wasm_i16x8_load8x8(input_a + ${N});
+      const v128_t va${ABC[N:N+8]} = ${WASM_X16X8_LOAD8X8}(input_a + ${N});
     input_a += ${BATCH_TILE};
 
     $for N in range(0, BATCH_TILE, 8):
-      v128_t vacc${ABC[N:N+4]} = wasm_i32x4_add(vbias, wasm_i32x4_mul(wasm_i32x4_extend_low_i16x8(va${ABC[N:N+8]}), va_multiplier));
-      v128_t vacc${ABC[N+4:N+8]} = wasm_i32x4_add(vbias, wasm_i32x4_mul(wasm_i32x4_extend_high_i16x8(va${ABC[N:N+8]}), va_multiplier));
+      v128_t vacc${ABC[N:N+4]} = wasm_i32x4_add(vbias, wasm_i32x4_mul(${WASM_X32X4_EXTEND_LOW_X16X8}(va${ABC[N:N+8]}), va_multiplier));
+      v128_t vacc${ABC[N+4:N+8]} = wasm_i32x4_add(vbias, wasm_i32x4_mul(${WASM_X32X4_EXTEND_HIGH_X16X8}(va${ABC[N:N+8]}), va_multiplier));
 
     $for N in range(0, BATCH_TILE, 4):
       vacc${ABC[N:N+4]} = wasm_i32x4_shr(wasm_i32x4_add(vacc${ABC[N:N+4]}, vrounding), vshift);
@@ -48,21 +56,21 @@
 
     $for N in range(0, BATCH_TILE, 16):
       $if N + 8 < BATCH_TILE:
-        v128_t vout${ABC[N:N+16]} = wasm_i8x16_narrow_i16x8(vout${ABC[N:N+8]}, vout${ABC[N+8:N+16]});
+        v128_t vout${ABC[N:N+16]} = ${WASM_X8X16_NARROW_I16X8}(vout${ABC[N:N+8]}, vout${ABC[N+8:N+16]});
       $else:
-        v128_t vout${ABC[N:N+8]}${ABC[N:N+8]} = wasm_i8x16_narrow_i16x8(vout${ABC[N:N+8]}, vout${ABC[N:N+8]});
+        v128_t vout${ABC[N:N+8]}${ABC[N:N+8]} = ${WASM_X8X16_NARROW_I16X8}(vout${ABC[N:N+8]}, vout${ABC[N:N+8]});
 
     $for N in range(0, BATCH_TILE, 16):
       $if N + 8 < BATCH_TILE:
-        vout${ABC[N:N+16]} = wasm_i8x16_max(vout${ABC[N:N+16]}, voutput_min);
+        vout${ABC[N:N+16]} = ${WASM_X8X16_MAX}(vout${ABC[N:N+16]}, voutput_min);
       $else:
-        vout${ABC[N:N+8]}${ABC[N:N+8]} = wasm_i8x16_max(vout${ABC[N:N+8]}${ABC[N:N+8]}, voutput_min);
+        vout${ABC[N:N+8]}${ABC[N:N+8]} = ${WASM_X8X16_MAX}(vout${ABC[N:N+8]}${ABC[N:N+8]}, voutput_min);
 
     $for N in range(0, BATCH_TILE, 16):
       $if N + 8 < BATCH_TILE:
-        vout${ABC[N:N+16]} = wasm_i8x16_min(vout${ABC[N:N+16]}, voutput_max);
+        vout${ABC[N:N+16]} = ${WASM_X8X16_MIN}(vout${ABC[N:N+16]}, voutput_max);
       $else:
-        vout${ABC[N:N+8]}${ABC[N:N+8]} = wasm_i8x16_min(vout${ABC[N:N+8]}${ABC[N:N+8]}, voutput_max);
+        vout${ABC[N:N+8]}${ABC[N:N+8]} = ${WASM_X8X16_MIN}(vout${ABC[N:N+8]}${ABC[N:N+8]}, voutput_max);
 
     $if BATCH_TILE >= 16:
       wasm_v128_store(output, vout${ABC[0:16]});
@@ -77,56 +85,56 @@
   }
   if XNN_UNLIKELY(n != 0) {
     ${"do " if BATCH_TILE > 8 else ""}{
-      const v128_t va${ABC[0:8]} = wasm_i16x8_load8x8(input_a);
+      const v128_t va${ABC[0:8]} = ${WASM_X16X8_LOAD8X8}(input_a);
       $if BATCH_TILE > 8:
         input_a += 8;
 
-      v128_t vacc${ABC[0:4]} = wasm_i32x4_add(vbias, wasm_i32x4_mul(wasm_i32x4_extend_low_i16x8(va${ABC[0:8]}), va_multiplier));
-      v128_t vacc${ABC[4:8]} = wasm_i32x4_add(vbias, wasm_i32x4_mul(wasm_i32x4_extend_high_i16x8(va${ABC[0:8]}), va_multiplier));
+      v128_t vacc${ABC[0:4]} = wasm_i32x4_add(vbias, wasm_i32x4_mul(${WASM_X32X4_EXTEND_LOW_X16X8}(va${ABC[0:8]}), va_multiplier));
+      v128_t vacc${ABC[4:8]} = wasm_i32x4_add(vbias, wasm_i32x4_mul(${WASM_X32X4_EXTEND_HIGH_X16X8}(va${ABC[0:8]}), va_multiplier));
 
       vacc${ABC[0:4]} = wasm_i32x4_shr(wasm_i32x4_add(vacc${ABC[0:4]}, vrounding), vshift);
       vacc${ABC[4:8]} = wasm_i32x4_shr(wasm_i32x4_add(vacc${ABC[4:8]}, vrounding), vshift);
 
       v128_t vout${ABC[0:8]} = wasm_i16x8_add_sat(wasm_i16x8_narrow_i32x4(vacc${ABC[0:4]}, vacc${ABC[4:8]}), voutput_zero_point);
 
-      v128_t vout${ABC[0:8]}${ABC[0:8]} = wasm_i8x16_narrow_i16x8(vout${ABC[0:8]}, vout${ABC[0:8]});
-      vout${ABC[0:8]}${ABC[0:8]} = wasm_i8x16_max(vout${ABC[0:8]}${ABC[0:8]}, voutput_min);
-      vout${ABC[0:8]}${ABC[0:8]} = wasm_i8x16_min(vout${ABC[0:8]}${ABC[0:8]}, voutput_max);
+      v128_t vout${ABC[0:8]}${ABC[0:8]} = ${WASM_X8X16_NARROW_I16X8}(vout${ABC[0:8]}, vout${ABC[0:8]});
+      vout${ABC[0:8]}${ABC[0:8]} = ${WASM_X8X16_MAX}(vout${ABC[0:8]}${ABC[0:8]}, voutput_min);
+      vout${ABC[0:8]}${ABC[0:8]} = ${WASM_X8X16_MIN}(vout${ABC[0:8]}${ABC[0:8]}, voutput_max);
 
       $if BATCH_TILE > 8:
-        if XNN_LIKELY(n >= (8 * sizeof(int8_t))) {
+        if XNN_LIKELY(n >= (8 * sizeof(${XINT8_T}))) {
           *((double*) output) = wasm_f64x2_extract_lane(vout${ABC[0:8]}${ABC[0:8]}, 0);
           output += 8;
-          n -= 8 * sizeof(int8_t);
+          n -= 8 * sizeof(${XINT8_T});
         } else {
-          if (n & (4 * sizeof(int8_t))) {
+          if (n & (4 * sizeof(${XINT8_T}))) {
             *((uint32_t*) output) = (uint32_t) wasm_i32x4_extract_lane(vout${ABC[0:8]}${ABC[0:8]}, 0);
             vout${ABC[0:8]}${ABC[0:8]} = wasm_u64x2_shr(vout${ABC[0:8]}${ABC[0:8]}, 32);
             output += 4;
           }
-          if (n & (2 * sizeof(int8_t))) {
+          if (n & (2 * sizeof(${XINT8_T}))) {
             *((uint16_t*) output) = (uint16_t) wasm_i16x8_extract_lane(vout${ABC[0:8]}${ABC[0:8]}, 0);
             vout${ABC[0:8]}${ABC[0:8]} = wasm_u32x4_shr(vout${ABC[0:8]}${ABC[0:8]}, 16);
             output += 2;
           }
-          if (n & (1 * sizeof(int8_t))) {
-            *output = wasm_i8x16_extract_lane(vout${ABC[0:8]}${ABC[0:8]}, 0);
+          if (n & (1 * sizeof(${XINT8_T}))) {
+            *output = (${XINT8_T}) wasm_i8x16_extract_lane(vout${ABC[0:8]}${ABC[0:8]}, 0);
           }
           n = 0;
         }
       $else:
-        if (n & (4 * sizeof(int8_t))) {
+        if (n & (4 * sizeof(${XINT8_T}))) {
           *((uint32_t*) output) = (uint32_t) wasm_i32x4_extract_lane(vout${ABC[0:8]}${ABC[0:8]}, 0);
           vout${ABC[0:8]}${ABC[0:8]} = wasm_u64x2_shr(vout${ABC[0:8]}${ABC[0:8]}, 32);
           output += 4;
         }
-        if (n & (2 * sizeof(int8_t))) {
+        if (n & (2 * sizeof(${XINT8_T}))) {
           *((uint16_t*) output) = (uint16_t) wasm_i16x8_extract_lane(vout${ABC[0:8]}${ABC[0:8]}, 0);
           vout${ABC[0:8]}${ABC[0:8]} = wasm_u32x4_shr(vout${ABC[0:8]}${ABC[0:8]}, 16);
           output += 2;
         }
-        if (n & (1 * sizeof(int8_t))) {
-          *output = wasm_i8x16_extract_lane(vout${ABC[0:8]}${ABC[0:8]}, 0);
+        if (n & (1 * sizeof(${XINT8_T}))) {
+          *output = (${XINT8_T}) wasm_i8x16_extract_lane(vout${ABC[0:8]}${ABC[0:8]}, 0);
         }
     }${" while (n != 0);" if BATCH_TILE > 8 else ""}
   }
diff --git a/src/qu8-vadd/gen/minmax-neon-ld64-x16.c b/src/qu8-vadd/gen/minmax-neon-ld64-x16.c
new file mode 100644
index 0000000..a6c8756
--- /dev/null
+++ b/src/qu8-vadd/gen/minmax-neon-ld64-x16.c
@@ -0,0 +1,112 @@
+// Auto-generated file. Do not edit!
+//   Template: src/qs8-vadd/neon-ld64.c.in
+//   Generator: tools/xngen
+//
+// Copyright 2020 Google LLC
+//
+// This source code is licensed under the BSD-style license found in the
+// LICENSE file in the root directory of this source tree.
+
+#include <assert.h>
+
+#include <arm_neon.h>
+
+#include <xnnpack/vadd.h>
+
+
+void xnn_qu8_vadd_minmax_ukernel__neon_ld64_x16(
+    size_t n,
+    const uint8_t* input_a,
+    const uint8_t* input_b,
+    uint8_t* output,
+    const union xnn_qu8_add_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_DISABLE_TSAN
+{
+  const uint8x8_t va_zero_point = vld1_dup_u8(&params->neon.a_zero_point);
+  const uint8x8_t vb_zero_point = vld1_dup_u8(&params->neon.b_zero_point);
+  const int32x4_t va_multiplier = vld1q_dup_s32(&params->neon.a_multiplier);
+  const int32x4_t vb_multiplier = vld1q_dup_s32(&params->neon.b_multiplier);
+  const int32x4_t vright_shift = vld1q_dup_s32(&params->neon.right_shift);
+  const int16x8_t voutput_zero_point = vld1q_dup_s16(&params->neon.output_zero_point);
+  const uint8x16_t voutput_min = vld1q_dup_u8(&params->neon.output_min);
+  const uint8x16_t voutput_max = vld1q_dup_u8(&params->neon.output_max);
+
+  for (; n >= 16 * sizeof(uint8_t); n -= 16 * sizeof(uint8_t)) {
+    const uint8x8_t va01234567 = vld1_u8(input_a); input_a += 8;
+    const uint8x8_t vb01234567 = vld1_u8(input_b); input_b += 8;
+    const uint8x8_t va89ABCDEF = vld1_u8(input_a); input_a += 8;
+    const uint8x8_t vb89ABCDEF = vld1_u8(input_b); input_b += 8;
+
+    const int16x8_t vxa01234567 = vreinterpretq_s16_u16(vsubl_u8(va01234567, va_zero_point));
+    const int16x8_t vxb01234567 = vreinterpretq_s16_u16(vsubl_u8(vb01234567, vb_zero_point));
+    const int16x8_t vxa89ABCDEF = vreinterpretq_s16_u16(vsubl_u8(va89ABCDEF, va_zero_point));
+    const int16x8_t vxb89ABCDEF = vreinterpretq_s16_u16(vsubl_u8(vb89ABCDEF, vb_zero_point));
+
+    int32x4_t vacc0123 = vmulq_s32(vmovl_s16(vget_low_s16(vxa01234567)), va_multiplier);
+    int32x4_t vacc4567 = vmulq_s32(vmovl_s16(vget_high_s16(vxa01234567)), va_multiplier);
+    int32x4_t vacc89AB = vmulq_s32(vmovl_s16(vget_low_s16(vxa89ABCDEF)), va_multiplier);
+    int32x4_t vaccCDEF = vmulq_s32(vmovl_s16(vget_high_s16(vxa89ABCDEF)), va_multiplier);
+
+    vacc0123 = vmlaq_s32(vacc0123, vmovl_s16(vget_low_s16(vxb01234567)), vb_multiplier);
+    vacc4567 = vmlaq_s32(vacc4567, vmovl_s16(vget_high_s16(vxb01234567)), vb_multiplier);
+    vacc89AB = vmlaq_s32(vacc89AB, vmovl_s16(vget_low_s16(vxb89ABCDEF)), vb_multiplier);
+    vaccCDEF = vmlaq_s32(vaccCDEF, vmovl_s16(vget_high_s16(vxb89ABCDEF)), vb_multiplier);
+
+    vacc0123 = vrshlq_s32(vacc0123, vright_shift);
+    vacc4567 = vrshlq_s32(vacc4567, vright_shift);
+    vacc89AB = vrshlq_s32(vacc89AB, vright_shift);
+    vaccCDEF = vrshlq_s32(vaccCDEF, vright_shift);
+
+    const int16x8_t vacc01234567 = vqaddq_s16(vcombine_s16(vqmovn_s32(vacc0123), vqmovn_s32(vacc4567)), voutput_zero_point);
+    const int16x8_t vacc89ABCDEF = vqaddq_s16(vcombine_s16(vqmovn_s32(vacc89AB), vqmovn_s32(vaccCDEF)), voutput_zero_point);
+
+    uint8x16_t vout0123456789ABCDEF = vcombine_u8(vqmovun_s16(vacc01234567), vqmovun_s16(vacc89ABCDEF));
+
+    vout0123456789ABCDEF = vmaxq_u8(vout0123456789ABCDEF, voutput_min);
+
+    vout0123456789ABCDEF = vminq_u8(vout0123456789ABCDEF, voutput_max);
+
+    vst1q_u8(output, vout0123456789ABCDEF); output += 16;
+  }
+  if XNN_UNLIKELY(n != 0) {
+    do {
+      const uint8x8_t va01234567 = vld1_u8(input_a); input_a += 8;
+      const uint8x8_t vb01234567 = vld1_u8(input_b); input_b += 8;
+
+      const int16x8_t vxa01234567 = vreinterpretq_s16_u16(vsubl_u8(va01234567, va_zero_point));
+      const int16x8_t vxb01234567 = vreinterpretq_s16_u16(vsubl_u8(vb01234567, vb_zero_point));
+
+      int32x4_t vacc0123 = vmulq_s32(vmovl_s16(vget_low_s16(vxa01234567)), va_multiplier);
+      int32x4_t vacc4567 = vmulq_s32(vmovl_s16(vget_high_s16(vxa01234567)), va_multiplier);
+
+      vacc0123 = vmlaq_s32(vacc0123, vmovl_s16(vget_low_s16(vxb01234567)), vb_multiplier);
+      vacc4567 = vmlaq_s32(vacc4567, vmovl_s16(vget_high_s16(vxb01234567)), vb_multiplier);
+
+      vacc0123 = vrshlq_s32(vacc0123, vright_shift);
+      vacc4567 = vrshlq_s32(vacc4567, vright_shift);
+
+      const int16x8_t vacc01234567 = vqaddq_s16(vcombine_s16(vqmovn_s32(vacc0123), vqmovn_s32(vacc4567)), voutput_zero_point);
+
+      uint8x8_t vout01234567 = vqmovun_s16(vacc01234567);
+      vout01234567 = vmax_u8(vout01234567, vget_low_u8(voutput_min));
+      vout01234567 = vmin_u8(vout01234567, vget_low_u8(voutput_max));
+
+      if XNN_LIKELY(n >= (8 * sizeof(uint8_t))) {
+        vst1_u8(output, vout01234567); output += 8;
+        n -= 8 * sizeof(uint8_t);
+      } else {
+        if (n & (4 * sizeof(uint8_t))) {
+          vst1_lane_u32(__builtin_assume_aligned(output, 1), vreinterpret_u32_u8(vout01234567), 0); output += 4;
+          vout01234567 = vext_u8(vout01234567, vout01234567, 4);
+        }
+        if (n & (2 * sizeof(uint8_t))) {
+          vst1_lane_u16(__builtin_assume_aligned(output, 1), vreinterpret_u16_u8(vout01234567), 0); output += 2;
+          vout01234567 = vext_u8(vout01234567, vout01234567, 2);
+        }
+        if (n & (1 * sizeof(uint8_t))) {
+          vst1_lane_u8(output, vout01234567, 0);
+        }
+        n = 0;
+      }
+    } while (n != 0);
+  }
+}
diff --git a/src/qu8-vadd/gen/minmax-neon-ld64-x8.c b/src/qu8-vadd/gen/minmax-neon-ld64-x8.c
new file mode 100644
index 0000000..3ff0fb1
--- /dev/null
+++ b/src/qu8-vadd/gen/minmax-neon-ld64-x8.c
@@ -0,0 +1,95 @@
+// Auto-generated file. Do not edit!
+//   Template: src/qs8-vadd/neon-ld64.c.in
+//   Generator: tools/xngen
+//
+// Copyright 2020 Google LLC
+//
+// This source code is licensed under the BSD-style license found in the
+// LICENSE file in the root directory of this source tree.
+
+#include <assert.h>
+
+#include <arm_neon.h>
+
+#include <xnnpack/vadd.h>
+
+
+void xnn_qu8_vadd_minmax_ukernel__neon_ld64_x8(
+    size_t n,
+    const uint8_t* input_a,
+    const uint8_t* input_b,
+    uint8_t* output,
+    const union xnn_qu8_add_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_DISABLE_TSAN
+{
+  const uint8x8_t va_zero_point = vld1_dup_u8(&params->neon.a_zero_point);
+  const uint8x8_t vb_zero_point = vld1_dup_u8(&params->neon.b_zero_point);
+  const int32x4_t va_multiplier = vld1q_dup_s32(&params->neon.a_multiplier);
+  const int32x4_t vb_multiplier = vld1q_dup_s32(&params->neon.b_multiplier);
+  const int32x4_t vright_shift = vld1q_dup_s32(&params->neon.right_shift);
+  const int16x8_t voutput_zero_point = vld1q_dup_s16(&params->neon.output_zero_point);
+  const uint8x16_t voutput_min = vld1q_dup_u8(&params->neon.output_min);
+  const uint8x16_t voutput_max = vld1q_dup_u8(&params->neon.output_max);
+
+  for (; n >= 8 * sizeof(uint8_t); n -= 8 * sizeof(uint8_t)) {
+    const uint8x8_t va01234567 = vld1_u8(input_a); input_a += 8;
+    const uint8x8_t vb01234567 = vld1_u8(input_b); input_b += 8;
+
+    const int16x8_t vxa01234567 = vreinterpretq_s16_u16(vsubl_u8(va01234567, va_zero_point));
+    const int16x8_t vxb01234567 = vreinterpretq_s16_u16(vsubl_u8(vb01234567, vb_zero_point));
+
+    int32x4_t vacc0123 = vmulq_s32(vmovl_s16(vget_low_s16(vxa01234567)), va_multiplier);
+    int32x4_t vacc4567 = vmulq_s32(vmovl_s16(vget_high_s16(vxa01234567)), va_multiplier);
+
+    vacc0123 = vmlaq_s32(vacc0123, vmovl_s16(vget_low_s16(vxb01234567)), vb_multiplier);
+    vacc4567 = vmlaq_s32(vacc4567, vmovl_s16(vget_high_s16(vxb01234567)), vb_multiplier);
+
+    vacc0123 = vrshlq_s32(vacc0123, vright_shift);
+    vacc4567 = vrshlq_s32(vacc4567, vright_shift);
+
+    const int16x8_t vacc01234567 = vqaddq_s16(vcombine_s16(vqmovn_s32(vacc0123), vqmovn_s32(vacc4567)), voutput_zero_point);
+
+    uint8x8_t vout01234567 = vqmovun_s16(vacc01234567);
+
+    vout01234567 = vmax_u8(vout01234567, vget_low_u8(voutput_min));
+
+    vout01234567 = vmin_u8(vout01234567, vget_low_u8(voutput_max));
+
+    vst1_u8(output, vout01234567); output += 8;
+  }
+  if XNN_UNLIKELY(n != 0) {
+    {
+      const uint8x8_t va01234567 = vld1_u8(input_a);
+      const uint8x8_t vb01234567 = vld1_u8(input_b);
+
+      const int16x8_t vxa01234567 = vreinterpretq_s16_u16(vsubl_u8(va01234567, va_zero_point));
+      const int16x8_t vxb01234567 = vreinterpretq_s16_u16(vsubl_u8(vb01234567, vb_zero_point));
+
+      int32x4_t vacc0123 = vmulq_s32(vmovl_s16(vget_low_s16(vxa01234567)), va_multiplier);
+      int32x4_t vacc4567 = vmulq_s32(vmovl_s16(vget_high_s16(vxa01234567)), va_multiplier);
+
+      vacc0123 = vmlaq_s32(vacc0123, vmovl_s16(vget_low_s16(vxb01234567)), vb_multiplier);
+      vacc4567 = vmlaq_s32(vacc4567, vmovl_s16(vget_high_s16(vxb01234567)), vb_multiplier);
+
+      vacc0123 = vrshlq_s32(vacc0123, vright_shift);
+      vacc4567 = vrshlq_s32(vacc4567, vright_shift);
+
+      const int16x8_t vacc01234567 = vqaddq_s16(vcombine_s16(vqmovn_s32(vacc0123), vqmovn_s32(vacc4567)), voutput_zero_point);
+
+      uint8x8_t vout01234567 = vqmovun_s16(vacc01234567);
+      vout01234567 = vmax_u8(vout01234567, vget_low_u8(voutput_min));
+      vout01234567 = vmin_u8(vout01234567, vget_low_u8(voutput_max));
+
+      if (n & (4 * sizeof(uint8_t))) {
+        vst1_lane_u32(__builtin_assume_aligned(output, 1), vreinterpret_u32_u8(vout01234567), 0); output += 4;
+        vout01234567 = vext_u8(vout01234567, vout01234567, 4);
+      }
+      if (n & (2 * sizeof(uint8_t))) {
+        vst1_lane_u16(__builtin_assume_aligned(output, 1), vreinterpret_u16_u8(vout01234567), 0); output += 2;
+        vout01234567 = vext_u8(vout01234567, vout01234567, 2);
+      }
+      if (n & (1 * sizeof(uint8_t))) {
+        vst1_lane_u8(output, vout01234567, 0);
+      }
+    }
+  }
+}
diff --git a/src/qu8-vadd/gen/minmax-scalar-x1.c b/src/qu8-vadd/gen/minmax-scalar-x1.c
new file mode 100644
index 0000000..a2684e0
--- /dev/null
+++ b/src/qu8-vadd/gen/minmax-scalar-x1.c
@@ -0,0 +1,44 @@
+// Auto-generated file. Do not edit!
+//   Template: src/qs8-vadd/scalar.c.in
+//   Generator: tools/xngen
+//
+// Copyright 2021 Google LLC
+//
+// This source code is licensed under the BSD-style license found in the
+// LICENSE file in the root directory of this source tree.
+
+#include <assert.h>
+
+#include <xnnpack/math.h>
+#include <xnnpack/vadd.h>
+
+
+void xnn_qu8_vadd_minmax_ukernel__scalar_x1(
+    size_t n,
+    const uint8_t* input_a,
+    const uint8_t* input_b,
+    uint8_t* output,
+    const union xnn_qu8_add_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_DISABLE_TSAN
+{
+  const int32_t vbias = params->scalar.bias;
+  const int32_t va_multiplier = params->scalar.a_multiplier;
+  const int32_t vb_multiplier = params->scalar.b_multiplier;
+  const int32_t vrounding = params->scalar.rounding;
+  const uint32_t vshift = params->scalar.shift;
+  const int32_t voutput_min_less_zero_point = params->scalar.output_min_less_zero_point;
+  const int32_t voutput_max_less_zero_point = params->scalar.output_max_less_zero_point;
+  const int32_t voutput_zero_point = params->scalar.output_zero_point;
+
+  do {
+    const int32_t va = *input_a++;
+    const int32_t vb = *input_b++;
+    const int32_t vacc = vbias + va * va_multiplier + vb * vb_multiplier;
+
+    int32_t vout = asr_s32(vacc + vrounding, vshift);
+    vout = math_max_s32(vout, voutput_min_less_zero_point);
+    vout = math_min_s32(vout, voutput_max_less_zero_point);
+    *output++ = (uint8_t) (vout + voutput_zero_point);
+
+    n -= sizeof(uint8_t);
+  } while (n != 0);
+}
diff --git a/src/qu8-vadd/gen/minmax-scalar-x2.c b/src/qu8-vadd/gen/minmax-scalar-x2.c
new file mode 100644
index 0000000..00fd437
--- /dev/null
+++ b/src/qu8-vadd/gen/minmax-scalar-x2.c
@@ -0,0 +1,72 @@
+// Auto-generated file. Do not edit!
+//   Template: src/qs8-vadd/scalar.c.in
+//   Generator: tools/xngen
+//
+// Copyright 2021 Google LLC
+//
+// This source code is licensed under the BSD-style license found in the
+// LICENSE file in the root directory of this source tree.
+
+#include <assert.h>
+
+#include <xnnpack/math.h>
+#include <xnnpack/vadd.h>
+
+
+void xnn_qu8_vadd_minmax_ukernel__scalar_x2(
+    size_t n,
+    const uint8_t* input_a,
+    const uint8_t* input_b,
+    uint8_t* output,
+    const union xnn_qu8_add_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_DISABLE_TSAN
+{
+  const int32_t vbias = params->scalar.bias;
+  const int32_t va_multiplier = params->scalar.a_multiplier;
+  const int32_t vb_multiplier = params->scalar.b_multiplier;
+  const int32_t vrounding = params->scalar.rounding;
+  const uint32_t vshift = params->scalar.shift;
+  const int32_t voutput_min_less_zero_point = params->scalar.output_min_less_zero_point;
+  const int32_t voutput_max_less_zero_point = params->scalar.output_max_less_zero_point;
+  const int32_t voutput_zero_point = params->scalar.output_zero_point;
+
+  for (; n >= 2 * sizeof(uint8_t); n -= 2 * sizeof(uint8_t)) {
+    const int32_t va0 = input_a[0];
+    const int32_t va1 = input_a[1];
+    input_a += 2;
+
+    const int32_t vb0 = input_b[0];
+    int32_t vacc0 = vbias + va0 * va_multiplier;
+    const int32_t vb1 = input_b[1];
+    int32_t vacc1 = vbias + va1 * va_multiplier;
+    input_b += 2;
+
+    vacc0 += vb0 * vb_multiplier;
+    vacc1 += vb1 * vb_multiplier;
+
+    int32_t vout0 = asr_s32(vacc0 + vrounding, vshift);
+    int32_t vout1 = asr_s32(vacc1 + vrounding, vshift);
+
+    vout0 = math_max_s32(vout0, voutput_min_less_zero_point);
+    vout1 = math_max_s32(vout1, voutput_min_less_zero_point);
+
+    vout0 = math_min_s32(vout0, voutput_max_less_zero_point);
+    vout1 = math_min_s32(vout1, voutput_max_less_zero_point);
+
+    vout0 += voutput_zero_point;
+    vout1 += voutput_zero_point;
+
+    output[0] = (uint8_t) vout0;
+    output[1] = (uint8_t) vout1;
+    output += 2;
+  }
+  if XNN_UNLIKELY(n != 0) {
+    const int32_t va = *input_a;
+    const int32_t vb = *input_b;
+    const int32_t vacc = vbias + va * va_multiplier + vb * vb_multiplier;
+
+    int32_t vout = asr_s32(vacc + vrounding, vshift);
+    vout = math_max_s32(vout, voutput_min_less_zero_point);
+    vout = math_min_s32(vout, voutput_max_less_zero_point);
+    *output++ = (uint8_t) (vout + voutput_zero_point);
+  }
+}
diff --git a/src/qu8-vadd/gen/minmax-scalar-x4.c b/src/qu8-vadd/gen/minmax-scalar-x4.c
new file mode 100644
index 0000000..5f9237e
--- /dev/null
+++ b/src/qu8-vadd/gen/minmax-scalar-x4.c
@@ -0,0 +1,94 @@
+// Auto-generated file. Do not edit!
+//   Template: src/qs8-vadd/scalar.c.in
+//   Generator: tools/xngen
+//
+// Copyright 2021 Google LLC
+//
+// This source code is licensed under the BSD-style license found in the
+// LICENSE file in the root directory of this source tree.
+
+#include <assert.h>
+
+#include <xnnpack/math.h>
+#include <xnnpack/vadd.h>
+
+
+void xnn_qu8_vadd_minmax_ukernel__scalar_x4(
+    size_t n,
+    const uint8_t* input_a,
+    const uint8_t* input_b,
+    uint8_t* output,
+    const union xnn_qu8_add_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_DISABLE_TSAN
+{
+  const int32_t vbias = params->scalar.bias;
+  const int32_t va_multiplier = params->scalar.a_multiplier;
+  const int32_t vb_multiplier = params->scalar.b_multiplier;
+  const int32_t vrounding = params->scalar.rounding;
+  const uint32_t vshift = params->scalar.shift;
+  const int32_t voutput_min_less_zero_point = params->scalar.output_min_less_zero_point;
+  const int32_t voutput_max_less_zero_point = params->scalar.output_max_less_zero_point;
+  const int32_t voutput_zero_point = params->scalar.output_zero_point;
+
+  for (; n >= 4 * sizeof(uint8_t); n -= 4 * sizeof(uint8_t)) {
+    const int32_t va0 = input_a[0];
+    const int32_t va1 = input_a[1];
+    const int32_t va2 = input_a[2];
+    const int32_t va3 = input_a[3];
+    input_a += 4;
+
+    const int32_t vb0 = input_b[0];
+    int32_t vacc0 = vbias + va0 * va_multiplier;
+    const int32_t vb1 = input_b[1];
+    int32_t vacc1 = vbias + va1 * va_multiplier;
+    const int32_t vb2 = input_b[2];
+    int32_t vacc2 = vbias + va2 * va_multiplier;
+    const int32_t vb3 = input_b[3];
+    int32_t vacc3 = vbias + va3 * va_multiplier;
+    input_b += 4;
+
+    vacc0 += vb0 * vb_multiplier;
+    vacc1 += vb1 * vb_multiplier;
+    vacc2 += vb2 * vb_multiplier;
+    vacc3 += vb3 * vb_multiplier;
+
+    int32_t vout0 = asr_s32(vacc0 + vrounding, vshift);
+    int32_t vout1 = asr_s32(vacc1 + vrounding, vshift);
+    int32_t vout2 = asr_s32(vacc2 + vrounding, vshift);
+    int32_t vout3 = asr_s32(vacc3 + vrounding, vshift);
+
+    vout0 = math_max_s32(vout0, voutput_min_less_zero_point);
+    vout1 = math_max_s32(vout1, voutput_min_less_zero_point);
+    vout2 = math_max_s32(vout2, voutput_min_less_zero_point);
+    vout3 = math_max_s32(vout3, voutput_min_less_zero_point);
+
+    vout0 = math_min_s32(vout0, voutput_max_less_zero_point);
+    vout1 = math_min_s32(vout1, voutput_max_less_zero_point);
+    vout2 = math_min_s32(vout2, voutput_max_less_zero_point);
+    vout3 = math_min_s32(vout3, voutput_max_less_zero_point);
+
+    vout0 += voutput_zero_point;
+    vout1 += voutput_zero_point;
+    vout2 += voutput_zero_point;
+    vout3 += voutput_zero_point;
+
+    output[0] = (uint8_t) vout0;
+    output[1] = (uint8_t) vout1;
+    output[2] = (uint8_t) vout2;
+    output[3] = (uint8_t) vout3;
+    output += 4;
+  }
+  if XNN_UNLIKELY(n != 0) {
+    do {
+      const int32_t va = *input_a++;
+      const int32_t vb = *input_b++;
+      const int32_t vacc = vbias + va * va_multiplier + vb * vb_multiplier;
+
+      int32_t vout = asr_s32(vacc + vrounding, vshift);
+      vout = math_max_s32(vout, voutput_min_less_zero_point);
+      vout = math_min_s32(vout, voutput_max_less_zero_point);
+      *output++ = (uint8_t) (vout + voutput_zero_point);
+
+      n -= sizeof(uint8_t);
+    } while (n != 0);
+  }
+}
diff --git a/src/qu8-vadd/gen/minmax-sse2-mul16-ld64-x16.c b/src/qu8-vadd/gen/minmax-sse2-mul16-ld64-x16.c
new file mode 100644
index 0000000..7deb623
--- /dev/null
+++ b/src/qu8-vadd/gen/minmax-sse2-mul16-ld64-x16.c
@@ -0,0 +1,149 @@
+// Auto-generated file. Do not edit!
+//   Template: src/qs8-vadd/sse-mul16-ld64.c.in
+//   Generator: tools/xngen
+//
+// Copyright 2020 Google LLC
+//
+// This source code is licensed under the BSD-style license found in the
+// LICENSE file in the root directory of this source tree.
+
+#include <assert.h>
+
+#include <emmintrin.h>
+
+#include <xnnpack/vadd.h>
+
+
+void xnn_qu8_vadd_minmax_ukernel__sse2_mul16_ld64_x16(
+    size_t n,
+    const uint8_t* input_a,
+    const uint8_t* input_b,
+    uint8_t* output,
+    const union xnn_qu8_add_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_DISABLE_TSAN
+{
+  const __m128i vbias = _mm_load_si128((const __m128i*) params->sse2.bias);
+  const __m128i va_multiplier_lo = _mm_load_si128((const __m128i*) params->sse2.a_multiplier_lo);
+  const __m128i va_multiplier_hi = _mm_load_si128((const __m128i*) params->sse2.a_multiplier_hi);
+  const __m128i vb_multiplier_lo = _mm_load_si128((const __m128i*) params->sse2.b_multiplier_lo);
+  const __m128i vb_multiplier_hi = _mm_load_si128((const __m128i*) params->sse2.b_multiplier_hi);
+  const __m128i vrounding = _mm_load_si128((const __m128i*) params->sse2.rounding);
+  const __m128i vshift = _mm_cvtsi32_si128((int) params->sse2.shift);
+  const __m128i voutput_zero_point = _mm_load_si128((const __m128i*) params->sse2.output_zero_point);
+  const __m128i voutput_min = _mm_load_si128((const __m128i*) params->sse2.output_min);
+  const __m128i voutput_max = _mm_load_si128((const __m128i*) params->sse2.output_max);
+
+  for (; n >= 16 * sizeof(uint8_t); n -= 16 * sizeof(uint8_t)) {
+    __m128i va01234567 = _mm_loadl_epi64((const __m128i*) input_a);
+    __m128i vb01234567 = _mm_loadl_epi64((const __m128i*) input_b);
+    __m128i va89ABCDEF = _mm_loadl_epi64((const __m128i*) (input_a + 8));
+    __m128i vb89ABCDEF = _mm_loadl_epi64((const __m128i*) (input_b + 8));
+    input_a += 16;
+    input_b += 16;
+
+    const __m128i vzero = _mm_setzero_si128();
+    va01234567 = _mm_unpacklo_epi8(va01234567, vzero);
+    vb01234567 = _mm_unpacklo_epi8(vb01234567, vzero);
+    va89ABCDEF = _mm_unpacklo_epi8(va89ABCDEF, vzero);
+    vb89ABCDEF = _mm_unpacklo_epi8(vb89ABCDEF, vzero);
+
+    __m128i vaprod01234567hi = _mm_mulhi_epu16(va01234567, va_multiplier_lo);
+    __m128i vbprod01234567hi = _mm_mulhi_epu16(vb01234567, vb_multiplier_lo);
+    const __m128i vaprod01234567lo = _mm_mullo_epi16(va01234567, va_multiplier_lo);
+    const __m128i vbprod01234567lo = _mm_mullo_epi16(vb01234567, vb_multiplier_lo);
+    __m128i vaprod89ABCDEFhi = _mm_mulhi_epu16(va89ABCDEF, va_multiplier_lo);
+    __m128i vbprod89ABCDEFhi = _mm_mulhi_epu16(vb89ABCDEF, vb_multiplier_lo);
+    const __m128i vaprod89ABCDEFlo = _mm_mullo_epi16(va89ABCDEF, va_multiplier_lo);
+    const __m128i vbprod89ABCDEFlo = _mm_mullo_epi16(vb89ABCDEF, vb_multiplier_lo);
+
+    vaprod01234567hi = _mm_add_epi16(vaprod01234567hi, _mm_mullo_epi16(va01234567, va_multiplier_hi));
+    vbprod01234567hi = _mm_add_epi16(vbprod01234567hi, _mm_mullo_epi16(vb01234567, vb_multiplier_hi));
+    vaprod89ABCDEFhi = _mm_add_epi16(vaprod89ABCDEFhi, _mm_mullo_epi16(va89ABCDEF, va_multiplier_hi));
+    vbprod89ABCDEFhi = _mm_add_epi16(vbprod89ABCDEFhi, _mm_mullo_epi16(vb89ABCDEF, vb_multiplier_hi));
+
+
+    __m128i vacc0123 = _mm_add_epi32(vbias, _mm_unpacklo_epi16(vaprod01234567lo, vaprod01234567hi));
+    __m128i vacc4567 = _mm_add_epi32(vbias, _mm_unpackhi_epi16(vaprod01234567lo, vaprod01234567hi));
+    __m128i vacc89AB = _mm_add_epi32(vbias, _mm_unpacklo_epi16(vaprod89ABCDEFlo, vaprod89ABCDEFhi));
+    __m128i vaccCDEF = _mm_add_epi32(vbias, _mm_unpackhi_epi16(vaprod89ABCDEFlo, vaprod89ABCDEFhi));
+
+    vacc0123 = _mm_add_epi32(vacc0123, _mm_unpacklo_epi16(vbprod01234567lo, vbprod01234567hi));
+    vacc4567 = _mm_add_epi32(vacc4567, _mm_unpackhi_epi16(vbprod01234567lo, vbprod01234567hi));
+    vacc89AB = _mm_add_epi32(vacc89AB, _mm_unpacklo_epi16(vbprod89ABCDEFlo, vbprod89ABCDEFhi));
+    vaccCDEF = _mm_add_epi32(vaccCDEF, _mm_unpackhi_epi16(vbprod89ABCDEFlo, vbprod89ABCDEFhi));
+
+    vacc0123 = _mm_sra_epi32(_mm_add_epi32(vacc0123, vrounding), vshift);
+    vacc4567 = _mm_sra_epi32(_mm_add_epi32(vacc4567, vrounding), vshift);
+    vacc89AB = _mm_sra_epi32(_mm_add_epi32(vacc89AB, vrounding), vshift);
+    vaccCDEF = _mm_sra_epi32(_mm_add_epi32(vaccCDEF, vrounding), vshift);
+
+    __m128i vout01234567 = _mm_adds_epi16(_mm_packs_epi32(vacc0123, vacc4567), voutput_zero_point);
+    __m128i vout89ABCDEF = _mm_adds_epi16(_mm_packs_epi32(vacc89AB, vaccCDEF), voutput_zero_point);
+
+
+    __m128i vout0123456789ABCDEF = _mm_packus_epi16(vout01234567, vout89ABCDEF);
+
+    vout0123456789ABCDEF = _mm_max_epu8(vout0123456789ABCDEF, voutput_min);
+
+    vout0123456789ABCDEF = _mm_min_epu8(vout0123456789ABCDEF, voutput_max);
+
+    _mm_storeu_si128((__m128i*) output, vout0123456789ABCDEF);
+    output += 16;
+  }
+  if XNN_UNLIKELY(n != 0) {
+    do {
+      __m128i va01234567 = _mm_loadl_epi64((const __m128i*) input_a);
+      __m128i vb01234567 = _mm_loadl_epi64((const __m128i*) input_b);
+      input_a += 8;
+      input_b += 8;
+
+      const __m128i vzero = _mm_setzero_si128();
+      va01234567 = _mm_unpacklo_epi8(va01234567, vzero);
+      vb01234567 = _mm_unpacklo_epi8(vb01234567, vzero);
+
+      __m128i vaprod01234567hi = _mm_mulhi_epu16(va01234567, va_multiplier_lo);
+      __m128i vbprod01234567hi = _mm_mulhi_epu16(vb01234567, vb_multiplier_lo);
+      const __m128i vaprod01234567lo = _mm_mullo_epi16(va01234567, va_multiplier_lo);
+      const __m128i vbprod01234567lo = _mm_mullo_epi16(vb01234567, vb_multiplier_lo);
+
+      vaprod01234567hi = _mm_add_epi16(vaprod01234567hi, _mm_mullo_epi16(va01234567, va_multiplier_hi));
+      vbprod01234567hi = _mm_add_epi16(vbprod01234567hi, _mm_mullo_epi16(vb01234567, vb_multiplier_hi));
+
+
+      __m128i vacc0123 = _mm_add_epi32(vbias, _mm_unpacklo_epi16(vaprod01234567lo, vaprod01234567hi));
+      __m128i vacc4567 = _mm_add_epi32(vbias, _mm_unpackhi_epi16(vaprod01234567lo, vaprod01234567hi));
+
+      vacc0123 = _mm_add_epi32(vacc0123, _mm_unpacklo_epi16(vbprod01234567lo, vbprod01234567hi));
+      vacc4567 = _mm_add_epi32(vacc4567, _mm_unpackhi_epi16(vbprod01234567lo, vbprod01234567hi));
+
+      vacc0123 = _mm_sra_epi32(_mm_add_epi32(vacc0123, vrounding), vshift);
+      vacc4567 = _mm_sra_epi32(_mm_add_epi32(vacc4567, vrounding), vshift);
+
+      __m128i vout01234567 = _mm_adds_epi16(_mm_packs_epi32(vacc0123, vacc4567), voutput_zero_point);
+
+      __m128i vout0123456701234567 = _mm_packus_epi16(vout01234567, vout01234567);
+      vout0123456701234567 = _mm_max_epu8(vout0123456701234567, voutput_min);
+      vout0123456701234567 = _mm_min_epu8(vout0123456701234567, voutput_max);
+
+      if XNN_LIKELY(n >= (8 * sizeof(uint8_t))) {
+        _mm_storel_epi64((__m128i*) output, vout0123456701234567);
+        output += 8;
+        n -= 8 * sizeof(uint8_t);
+      } else {
+        if (n & (4 * sizeof(uint8_t))) {
+          *((uint32_t*) output) = (uint32_t) _mm_cvtsi128_si32(vout0123456701234567);
+          vout0123456701234567 = _mm_srli_epi64(vout0123456701234567, 32);
+          output += 4;
+        }
+        if (n & (2 * sizeof(uint8_t))) {
+          *((uint16_t*) output) = (uint16_t) _mm_cvtsi128_si32(vout0123456701234567);
+          vout0123456701234567 = _mm_srli_epi32(vout0123456701234567, 16);
+          output += 2;
+        }
+        if (n & (1 * sizeof(uint8_t))) {
+          *output = (int32_t) _mm_cvtsi128_si32(vout0123456701234567);
+        }
+        n = 0;
+      }
+    } while (n != 0);
+  }
+}
diff --git a/src/qu8-vadd/gen/minmax-sse2-mul16-ld64-x8.c b/src/qu8-vadd/gen/minmax-sse2-mul16-ld64-x8.c
new file mode 100644
index 0000000..c8095f3
--- /dev/null
+++ b/src/qu8-vadd/gen/minmax-sse2-mul16-ld64-x8.c
@@ -0,0 +1,123 @@
+// Auto-generated file. Do not edit!
+//   Template: src/qs8-vadd/sse-mul16-ld64.c.in
+//   Generator: tools/xngen
+//
+// Copyright 2020 Google LLC
+//
+// This source code is licensed under the BSD-style license found in the
+// LICENSE file in the root directory of this source tree.
+
+#include <assert.h>
+
+#include <emmintrin.h>
+
+#include <xnnpack/vadd.h>
+
+
+void xnn_qu8_vadd_minmax_ukernel__sse2_mul16_ld64_x8(
+    size_t n,
+    const uint8_t* input_a,
+    const uint8_t* input_b,
+    uint8_t* output,
+    const union xnn_qu8_add_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_DISABLE_TSAN
+{
+  const __m128i vbias = _mm_load_si128((const __m128i*) params->sse2.bias);
+  const __m128i va_multiplier_lo = _mm_load_si128((const __m128i*) params->sse2.a_multiplier_lo);
+  const __m128i va_multiplier_hi = _mm_load_si128((const __m128i*) params->sse2.a_multiplier_hi);
+  const __m128i vb_multiplier_lo = _mm_load_si128((const __m128i*) params->sse2.b_multiplier_lo);
+  const __m128i vb_multiplier_hi = _mm_load_si128((const __m128i*) params->sse2.b_multiplier_hi);
+  const __m128i vrounding = _mm_load_si128((const __m128i*) params->sse2.rounding);
+  const __m128i vshift = _mm_cvtsi32_si128((int) params->sse2.shift);
+  const __m128i voutput_zero_point = _mm_load_si128((const __m128i*) params->sse2.output_zero_point);
+  const __m128i voutput_min = _mm_load_si128((const __m128i*) params->sse2.output_min);
+  const __m128i voutput_max = _mm_load_si128((const __m128i*) params->sse2.output_max);
+
+  for (; n >= 8 * sizeof(uint8_t); n -= 8 * sizeof(uint8_t)) {
+    __m128i va01234567 = _mm_loadl_epi64((const __m128i*) input_a);
+    __m128i vb01234567 = _mm_loadl_epi64((const __m128i*) input_b);
+    input_a += 8;
+    input_b += 8;
+
+    const __m128i vzero = _mm_setzero_si128();
+    va01234567 = _mm_unpacklo_epi8(va01234567, vzero);
+    vb01234567 = _mm_unpacklo_epi8(vb01234567, vzero);
+
+    __m128i vaprod01234567hi = _mm_mulhi_epu16(va01234567, va_multiplier_lo);
+    __m128i vbprod01234567hi = _mm_mulhi_epu16(vb01234567, vb_multiplier_lo);
+    const __m128i vaprod01234567lo = _mm_mullo_epi16(va01234567, va_multiplier_lo);
+    const __m128i vbprod01234567lo = _mm_mullo_epi16(vb01234567, vb_multiplier_lo);
+
+    vaprod01234567hi = _mm_add_epi16(vaprod01234567hi, _mm_mullo_epi16(va01234567, va_multiplier_hi));
+    vbprod01234567hi = _mm_add_epi16(vbprod01234567hi, _mm_mullo_epi16(vb01234567, vb_multiplier_hi));
+
+
+    __m128i vacc0123 = _mm_add_epi32(vbias, _mm_unpacklo_epi16(vaprod01234567lo, vaprod01234567hi));
+    __m128i vacc4567 = _mm_add_epi32(vbias, _mm_unpackhi_epi16(vaprod01234567lo, vaprod01234567hi));
+
+    vacc0123 = _mm_add_epi32(vacc0123, _mm_unpacklo_epi16(vbprod01234567lo, vbprod01234567hi));
+    vacc4567 = _mm_add_epi32(vacc4567, _mm_unpackhi_epi16(vbprod01234567lo, vbprod01234567hi));
+
+    vacc0123 = _mm_sra_epi32(_mm_add_epi32(vacc0123, vrounding), vshift);
+    vacc4567 = _mm_sra_epi32(_mm_add_epi32(vacc4567, vrounding), vshift);
+
+    __m128i vout01234567 = _mm_adds_epi16(_mm_packs_epi32(vacc0123, vacc4567), voutput_zero_point);
+
+
+    __m128i vout0123456701234567 = _mm_packus_epi16(vout01234567, vout01234567);
+
+    vout0123456701234567 = _mm_max_epu8(vout0123456701234567, voutput_min);
+
+    vout0123456701234567 = _mm_min_epu8(vout0123456701234567, voutput_max);
+
+    _mm_storel_epi64((__m128i*) output, vout0123456701234567);
+    output += 8;
+  }
+  if XNN_UNLIKELY(n != 0) {
+    {
+      __m128i va01234567 = _mm_loadl_epi64((const __m128i*) input_a);
+      __m128i vb01234567 = _mm_loadl_epi64((const __m128i*) input_b);
+
+      const __m128i vzero = _mm_setzero_si128();
+      va01234567 = _mm_unpacklo_epi8(va01234567, vzero);
+      vb01234567 = _mm_unpacklo_epi8(vb01234567, vzero);
+
+      __m128i vaprod01234567hi = _mm_mulhi_epu16(va01234567, va_multiplier_lo);
+      __m128i vbprod01234567hi = _mm_mulhi_epu16(vb01234567, vb_multiplier_lo);
+      const __m128i vaprod01234567lo = _mm_mullo_epi16(va01234567, va_multiplier_lo);
+      const __m128i vbprod01234567lo = _mm_mullo_epi16(vb01234567, vb_multiplier_lo);
+
+      vaprod01234567hi = _mm_add_epi16(vaprod01234567hi, _mm_mullo_epi16(va01234567, va_multiplier_hi));
+      vbprod01234567hi = _mm_add_epi16(vbprod01234567hi, _mm_mullo_epi16(vb01234567, vb_multiplier_hi));
+
+
+      __m128i vacc0123 = _mm_add_epi32(vbias, _mm_unpacklo_epi16(vaprod01234567lo, vaprod01234567hi));
+      __m128i vacc4567 = _mm_add_epi32(vbias, _mm_unpackhi_epi16(vaprod01234567lo, vaprod01234567hi));
+
+      vacc0123 = _mm_add_epi32(vacc0123, _mm_unpacklo_epi16(vbprod01234567lo, vbprod01234567hi));
+      vacc4567 = _mm_add_epi32(vacc4567, _mm_unpackhi_epi16(vbprod01234567lo, vbprod01234567hi));
+
+      vacc0123 = _mm_sra_epi32(_mm_add_epi32(vacc0123, vrounding), vshift);
+      vacc4567 = _mm_sra_epi32(_mm_add_epi32(vacc4567, vrounding), vshift);
+
+      __m128i vout01234567 = _mm_adds_epi16(_mm_packs_epi32(vacc0123, vacc4567), voutput_zero_point);
+
+      __m128i vout0123456701234567 = _mm_packus_epi16(vout01234567, vout01234567);
+      vout0123456701234567 = _mm_max_epu8(vout0123456701234567, voutput_min);
+      vout0123456701234567 = _mm_min_epu8(vout0123456701234567, voutput_max);
+
+      if (n & (4 * sizeof(uint8_t))) {
+        *((uint32_t*) output) = (uint32_t) _mm_cvtsi128_si32(vout0123456701234567);
+        vout0123456701234567 = _mm_srli_epi64(vout0123456701234567, 32);
+        output += 4;
+      }
+      if (n & (2 * sizeof(uint8_t))) {
+        *((uint16_t*) output) = (uint16_t) _mm_cvtsi128_si32(vout0123456701234567);
+        vout0123456701234567 = _mm_srli_epi32(vout0123456701234567, 16);
+        output += 2;
+      }
+      if (n & (1 * sizeof(uint8_t))) {
+        *output = (uint8_t) _mm_cvtsi128_si32(vout0123456701234567);
+      }
+    }
+  }
+}
diff --git a/src/qu8-vadd/gen/minmax-wasmsimd-x16.c b/src/qu8-vadd/gen/minmax-wasmsimd-x16.c
new file mode 100644
index 0000000..563534c
--- /dev/null
+++ b/src/qu8-vadd/gen/minmax-wasmsimd-x16.c
@@ -0,0 +1,112 @@
+// Auto-generated file. Do not edit!
+//   Template: src/qs8-vadd/wasmsimd.c.in
+//   Generator: tools/xngen
+//
+// Copyright 2020 Google LLC
+//
+// This source code is licensed under the BSD-style license found in the
+// LICENSE file in the root directory of this source tree.
+
+#include <assert.h>
+
+#include <wasm_simd128.h>
+
+#include <xnnpack/vadd.h>
+
+
+void xnn_qu8_vadd_minmax_ukernel__wasmsimd_x16(
+    size_t n,
+    const uint8_t* input_a,
+    const uint8_t* input_b,
+    uint8_t* output,
+    const union xnn_qu8_add_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_DISABLE_TSAN
+{
+  const v128_t vbias = wasm_v128_load(params->wasmsimd.bias);
+  const v128_t va_multiplier = wasm_v128_load(params->wasmsimd.a_multiplier);
+  const v128_t vb_multiplier = wasm_v128_load(params->wasmsimd.b_multiplier);
+  const v128_t vrounding = wasm_v128_load(params->wasmsimd.rounding);
+  const int32_t vshift = params->wasmsimd.shift;
+  const v128_t voutput_zero_point = wasm_v128_load(params->wasmsimd.output_zero_point);
+  const v128_t voutput_min = wasm_v128_load(params->wasmsimd.output_min);
+  const v128_t voutput_max = wasm_v128_load(params->wasmsimd.output_max);
+
+  for (; n >= 16 * sizeof(uint8_t); n -= 16 * sizeof(uint8_t)) {
+    const v128_t va01234567 = wasm_u16x8_load8x8(input_a);
+    const v128_t vb01234567 = wasm_u16x8_load8x8(input_b);
+    const v128_t va89ABCDEF = wasm_u16x8_load8x8(input_a + 8);
+    const v128_t vb89ABCDEF = wasm_u16x8_load8x8(input_b + 8);
+    input_a += 16;
+    input_b += 16;
+
+    v128_t vacc0123 = wasm_i32x4_add(vbias, wasm_i32x4_mul(wasm_u32x4_extend_low_u16x8(va01234567), va_multiplier));
+    v128_t vacc4567 = wasm_i32x4_add(vbias, wasm_i32x4_mul(wasm_u32x4_extend_high_u16x8(va01234567), va_multiplier));
+    v128_t vacc89AB = wasm_i32x4_add(vbias, wasm_i32x4_mul(wasm_u32x4_extend_low_u16x8(va89ABCDEF), va_multiplier));
+    v128_t vaccCDEF = wasm_i32x4_add(vbias, wasm_i32x4_mul(wasm_u32x4_extend_high_u16x8(va89ABCDEF), va_multiplier));
+
+    vacc0123 = wasm_i32x4_add(vacc0123, wasm_i32x4_mul(wasm_u32x4_extend_low_u16x8(vb01234567), vb_multiplier));
+    vacc4567 = wasm_i32x4_add(vacc4567, wasm_i32x4_mul(wasm_u32x4_extend_high_u16x8(vb01234567), vb_multiplier));
+    vacc89AB = wasm_i32x4_add(vacc89AB, wasm_i32x4_mul(wasm_u32x4_extend_low_u16x8(vb89ABCDEF), vb_multiplier));
+    vaccCDEF = wasm_i32x4_add(vaccCDEF, wasm_i32x4_mul(wasm_u32x4_extend_high_u16x8(vb89ABCDEF), vb_multiplier));
+
+    vacc0123 = wasm_i32x4_shr(wasm_i32x4_add(vacc0123, vrounding), vshift);
+    vacc4567 = wasm_i32x4_shr(wasm_i32x4_add(vacc4567, vrounding), vshift);
+    vacc89AB = wasm_i32x4_shr(wasm_i32x4_add(vacc89AB, vrounding), vshift);
+    vaccCDEF = wasm_i32x4_shr(wasm_i32x4_add(vaccCDEF, vrounding), vshift);
+
+    v128_t vout01234567 = wasm_i16x8_add_sat(wasm_i16x8_narrow_i32x4(vacc0123, vacc4567), voutput_zero_point);
+    v128_t vout89ABCDEF = wasm_i16x8_add_sat(wasm_i16x8_narrow_i32x4(vacc89AB, vaccCDEF), voutput_zero_point);
+
+    v128_t vout0123456789ABCDEF = wasm_u8x16_narrow_i16x8(vout01234567, vout89ABCDEF);
+
+    vout0123456789ABCDEF = wasm_u8x16_max(vout0123456789ABCDEF, voutput_min);
+
+    vout0123456789ABCDEF = wasm_u8x16_min(vout0123456789ABCDEF, voutput_max);
+
+    wasm_v128_store(output, vout0123456789ABCDEF);
+    output += 16;
+  }
+  if XNN_UNLIKELY(n != 0) {
+    do {
+      const v128_t va01234567 = wasm_u16x8_load8x8(input_a);
+      const v128_t vb01234567 = wasm_u16x8_load8x8(input_b);
+      input_a += 8;
+      input_b += 8;
+
+      v128_t vacc0123 = wasm_i32x4_add(vbias, wasm_i32x4_mul(wasm_u32x4_extend_low_u16x8(va01234567), va_multiplier));
+      v128_t vacc4567 = wasm_i32x4_add(vbias, wasm_i32x4_mul(wasm_u32x4_extend_high_u16x8(va01234567), va_multiplier));
+
+      vacc0123 = wasm_i32x4_add(vacc0123, wasm_i32x4_mul(wasm_u32x4_extend_low_u16x8(vb01234567), vb_multiplier));
+      vacc4567 = wasm_i32x4_add(vacc4567, wasm_i32x4_mul(wasm_u32x4_extend_high_u16x8(vb01234567), vb_multiplier));
+
+      vacc0123 = wasm_i32x4_shr(wasm_i32x4_add(vacc0123, vrounding), vshift);
+      vacc4567 = wasm_i32x4_shr(wasm_i32x4_add(vacc4567, vrounding), vshift);
+
+      v128_t vout01234567 = wasm_i16x8_add_sat(wasm_i16x8_narrow_i32x4(vacc0123, vacc4567), voutput_zero_point);
+
+      v128_t vout0123456701234567 = wasm_u8x16_narrow_i16x8(vout01234567, vout01234567);
+      vout0123456701234567 = wasm_u8x16_max(vout0123456701234567, voutput_min);
+      vout0123456701234567 = wasm_u8x16_min(vout0123456701234567, voutput_max);
+
+      if XNN_LIKELY(n >= (8 * sizeof(uint8_t))) {
+        *((double*) output) = wasm_f64x2_extract_lane(vout0123456701234567, 0);
+        output += 8;
+        n -= 8 * sizeof(uint8_t);
+      } else {
+        if (n & (4 * sizeof(uint8_t))) {
+          *((uint32_t*) output) = (uint32_t) wasm_i32x4_extract_lane(vout0123456701234567, 0);
+          vout0123456701234567 = wasm_u64x2_shr(vout0123456701234567, 32);
+          output += 4;
+        }
+        if (n & (2 * sizeof(uint8_t))) {
+          *((uint16_t*) output) = (uint16_t) wasm_i16x8_extract_lane(vout0123456701234567, 0);
+          vout0123456701234567 = wasm_u32x4_shr(vout0123456701234567, 16);
+          output += 2;
+        }
+        if (n & (1 * sizeof(uint8_t))) {
+          *output = (uint8_t) wasm_i8x16_extract_lane(vout0123456701234567, 0);
+        }
+        n = 0;
+      }
+    } while (n != 0);
+  }
+}
diff --git a/src/qu8-vadd/gen/minmax-wasmsimd-x8.c b/src/qu8-vadd/gen/minmax-wasmsimd-x8.c
new file mode 100644
index 0000000..df5b45f
--- /dev/null
+++ b/src/qu8-vadd/gen/minmax-wasmsimd-x8.c
@@ -0,0 +1,94 @@
+// Auto-generated file. Do not edit!
+//   Template: src/qs8-vadd/wasmsimd.c.in
+//   Generator: tools/xngen
+//
+// Copyright 2020 Google LLC
+//
+// This source code is licensed under the BSD-style license found in the
+// LICENSE file in the root directory of this source tree.
+
+#include <assert.h>
+
+#include <wasm_simd128.h>
+
+#include <xnnpack/vadd.h>
+
+
+void xnn_qu8_vadd_minmax_ukernel__wasmsimd_x8(
+    size_t n,
+    const uint8_t* input_a,
+    const uint8_t* input_b,
+    uint8_t* output,
+    const union xnn_qu8_add_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_DISABLE_TSAN
+{
+  const v128_t vbias = wasm_v128_load(params->wasmsimd.bias);
+  const v128_t va_multiplier = wasm_v128_load(params->wasmsimd.a_multiplier);
+  const v128_t vb_multiplier = wasm_v128_load(params->wasmsimd.b_multiplier);
+  const v128_t vrounding = wasm_v128_load(params->wasmsimd.rounding);
+  const int32_t vshift = params->wasmsimd.shift;
+  const v128_t voutput_zero_point = wasm_v128_load(params->wasmsimd.output_zero_point);
+  const v128_t voutput_min = wasm_v128_load(params->wasmsimd.output_min);
+  const v128_t voutput_max = wasm_v128_load(params->wasmsimd.output_max);
+
+  for (; n >= 8 * sizeof(uint8_t); n -= 8 * sizeof(uint8_t)) {
+    const v128_t va01234567 = wasm_u16x8_load8x8(input_a);
+    const v128_t vb01234567 = wasm_u16x8_load8x8(input_b);
+    input_a += 8;
+    input_b += 8;
+
+    v128_t vacc0123 = wasm_i32x4_add(vbias, wasm_i32x4_mul(wasm_u32x4_extend_low_u16x8(va01234567), va_multiplier));
+    v128_t vacc4567 = wasm_i32x4_add(vbias, wasm_i32x4_mul(wasm_u32x4_extend_high_u16x8(va01234567), va_multiplier));
+
+    vacc0123 = wasm_i32x4_add(vacc0123, wasm_i32x4_mul(wasm_u32x4_extend_low_u16x8(vb01234567), vb_multiplier));
+    vacc4567 = wasm_i32x4_add(vacc4567, wasm_i32x4_mul(wasm_u32x4_extend_high_u16x8(vb01234567), vb_multiplier));
+
+    vacc0123 = wasm_i32x4_shr(wasm_i32x4_add(vacc0123, vrounding), vshift);
+    vacc4567 = wasm_i32x4_shr(wasm_i32x4_add(vacc4567, vrounding), vshift);
+
+    v128_t vout01234567 = wasm_i16x8_add_sat(wasm_i16x8_narrow_i32x4(vacc0123, vacc4567), voutput_zero_point);
+
+    v128_t vout0123456701234567 = wasm_u8x16_narrow_i16x8(vout01234567, vout01234567);
+
+    vout0123456701234567 = wasm_u8x16_max(vout0123456701234567, voutput_min);
+
+    vout0123456701234567 = wasm_u8x16_min(vout0123456701234567, voutput_max);
+
+    *((double*) output) = wasm_f64x2_extract_lane(vout0123456701234567, 0);
+    output += 8;
+  }
+  if XNN_UNLIKELY(n != 0) {
+    {
+      const v128_t va01234567 = wasm_u16x8_load8x8(input_a);
+      const v128_t vb01234567 = wasm_u16x8_load8x8(input_b);
+
+      v128_t vacc0123 = wasm_i32x4_add(vbias, wasm_i32x4_mul(wasm_u32x4_extend_low_u16x8(va01234567), va_multiplier));
+      v128_t vacc4567 = wasm_i32x4_add(vbias, wasm_i32x4_mul(wasm_u32x4_extend_high_u16x8(va01234567), va_multiplier));
+
+      vacc0123 = wasm_i32x4_add(vacc0123, wasm_i32x4_mul(wasm_u32x4_extend_low_u16x8(vb01234567), vb_multiplier));
+      vacc4567 = wasm_i32x4_add(vacc4567, wasm_i32x4_mul(wasm_u32x4_extend_high_u16x8(vb01234567), vb_multiplier));
+
+      vacc0123 = wasm_i32x4_shr(wasm_i32x4_add(vacc0123, vrounding), vshift);
+      vacc4567 = wasm_i32x4_shr(wasm_i32x4_add(vacc4567, vrounding), vshift);
+
+      v128_t vout01234567 = wasm_i16x8_add_sat(wasm_i16x8_narrow_i32x4(vacc0123, vacc4567), voutput_zero_point);
+
+      v128_t vout0123456701234567 = wasm_u8x16_narrow_i16x8(vout01234567, vout01234567);
+      vout0123456701234567 = wasm_u8x16_max(vout0123456701234567, voutput_min);
+      vout0123456701234567 = wasm_u8x16_min(vout0123456701234567, voutput_max);
+
+      if (n & (4 * sizeof(uint8_t))) {
+        *((uint32_t*) output) = (uint32_t) wasm_i32x4_extract_lane(vout0123456701234567, 0);
+        vout0123456701234567 = wasm_u64x2_shr(vout0123456701234567, 32);
+        output += 4;
+      }
+      if (n & (2 * sizeof(uint8_t))) {
+        *((uint16_t*) output) = (uint16_t) wasm_i16x8_extract_lane(vout0123456701234567, 0);
+        vout0123456701234567 = wasm_u32x4_shr(vout0123456701234567, 16);
+        output += 2;
+      }
+      if (n & (1 * sizeof(uint8_t))) {
+        *output = (uint8_t) wasm_i8x16_extract_lane(vout0123456701234567, 0);
+      }
+    }
+  }
+}
diff --git a/src/qu8-vadd/minmax-neon.c b/src/qu8-vadd/minmax-neon.c
deleted file mode 100644
index 2cdca03..0000000
--- a/src/qu8-vadd/minmax-neon.c
+++ /dev/null
@@ -1,246 +0,0 @@
-// Copyright (c) Facebook, Inc. and its affiliates.
-// All rights reserved.
-//
-// Copyright 2019 Google LLC
-//
-// This source code is licensed under the BSD-style license found in the
-// LICENSE file in the root directory of this source tree.
-
-#include <arm_neon.h>
-
-#include <xnnpack/common.h>
-#include <xnnpack/vadd.h>
-
-
-void xnn_qu8_vadd_minmax_ukernel__neon_x32(
-    size_t n,
-    const uint8_t* a,
-    const uint8_t* b,
-    uint8_t* y,
-    const union xnn_qu8_add_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_DISABLE_TSAN
-{
-  const uint8x8_t va_zero_point = vld1_dup_u8(&params->neon.a_zero_point);
-  const uint8x8_t vb_zero_point = vld1_dup_u8(&params->neon.b_zero_point);
-  const int16x8_t vy_zero_point = vld1q_dup_s16(&params->neon.y_zero_point);
-  const int32x4_t va_multiplier = vld1q_dup_s32(&params->neon.a_multiplier);
-  const int32x4_t vb_multiplier = vld1q_dup_s32(&params->neon.b_multiplier);
-  const int32x4_t vright_shift = vld1q_dup_s32(&params->neon.right_shift);
-  const int32x4_t vzero_shift_mask = vreinterpretq_s32_u32(vceqq_s32(vright_shift, vmovq_n_s32(0)));
-  const uint8x16_t vy_max = vld1q_dup_u8(&params->neon.y_max);
-  const uint8x16_t vy_min = vld1q_dup_u8(&params->neon.y_min);
-#if XNN_ARCH_ARM64
-  for (; n >= 32 * sizeof(uint8_t); n -= 32 * sizeof(uint8_t)) {
-    const uint8x16_t va01 = vld1q_u8(a); a += 16;
-    const uint8x16_t vb01 = vld1q_u8(b); b += 16;
-    const uint8x16_t va23 = vld1q_u8(a); a += 16;
-    const uint8x16_t vb23 = vld1q_u8(b); b += 16;
-
-    // Subtract zero point.
-    const int16x8_t vxa0 = vreinterpretq_s16_u16(vsubl_u8(vget_low_u8(va01), va_zero_point));
-    const int16x8_t vxb0 = vreinterpretq_s16_u16(vsubl_u8(vget_low_u8(vb01), vb_zero_point));
-    const int16x8_t vxa1 = vreinterpretq_s16_u16(vsubl_u8(vget_high_u8(va01), va_zero_point));
-    const int16x8_t vxb1 = vreinterpretq_s16_u16(vsubl_u8(vget_high_u8(vb01), vb_zero_point));
-    const int16x8_t vxa2 = vreinterpretq_s16_u16(vsubl_u8(vget_low_u8(va23), va_zero_point));
-    const int16x8_t vxb2 = vreinterpretq_s16_u16(vsubl_u8(vget_low_u8(vb23), vb_zero_point));
-    const int16x8_t vxa3 = vreinterpretq_s16_u16(vsubl_u8(vget_high_u8(va23), va_zero_point));
-    const int16x8_t vxb3 = vreinterpretq_s16_u16(vsubl_u8(vget_high_u8(vb23), vb_zero_point));
-
-    // Multiply by factors and accumulate products.
-    int32x4_t vacc0_lo = vmulq_s32(vmovl_s16(vget_low_s16(vxa0)), va_multiplier);
-    int32x4_t vacc1_lo = vmulq_s32(vmovl_s16(vget_low_s16(vxa1)), va_multiplier);
-    int32x4_t vacc2_lo = vmulq_s32(vmovl_s16(vget_low_s16(vxa2)), va_multiplier);
-    int32x4_t vacc3_lo = vmulq_s32(vmovl_s16(vget_low_s16(vxa3)), va_multiplier);
-    int32x4_t vacc0_hi = vmulq_s32(vmovl_high_s16(vxa0), va_multiplier);
-    int32x4_t vacc1_hi = vmulq_s32(vmovl_high_s16(vxa1), va_multiplier);
-    int32x4_t vacc2_hi = vmulq_s32(vmovl_high_s16(vxa2), va_multiplier);
-    int32x4_t vacc3_hi = vmulq_s32(vmovl_high_s16(vxa3), va_multiplier);
-
-    vacc0_lo = vmlaq_s32(vacc0_lo, vmovl_s16(vget_low_s16(vxb0)), vb_multiplier);
-    vacc1_lo = vmlaq_s32(vacc1_lo, vmovl_s16(vget_low_s16(vxb1)), vb_multiplier);
-    vacc2_lo = vmlaq_s32(vacc2_lo, vmovl_s16(vget_low_s16(vxb2)), vb_multiplier);
-    vacc3_lo = vmlaq_s32(vacc3_lo, vmovl_s16(vget_low_s16(vxb3)), vb_multiplier);
-    vacc0_hi = vmlaq_s32(vacc0_hi, vmovl_high_s16(vxb0), vb_multiplier);
-    vacc1_hi = vmlaq_s32(vacc1_hi, vmovl_high_s16(vxb1), vb_multiplier);
-    vacc2_hi = vmlaq_s32(vacc2_hi, vmovl_high_s16(vxb2), vb_multiplier);
-    vacc3_hi = vmlaq_s32(vacc3_hi, vmovl_high_s16(vxb3), vb_multiplier);
-
-    // Shift right and round.
-    vacc0_lo = vsraq_n_s32(vacc0_lo, vbicq_s32(vacc0_lo, vzero_shift_mask), 31);
-    vacc1_lo = vsraq_n_s32(vacc1_lo, vbicq_s32(vacc1_lo, vzero_shift_mask), 31);
-    vacc2_lo = vsraq_n_s32(vacc2_lo, vbicq_s32(vacc2_lo, vzero_shift_mask), 31);
-    vacc3_lo = vsraq_n_s32(vacc3_lo, vbicq_s32(vacc3_lo, vzero_shift_mask), 31);
-    vacc0_hi = vsraq_n_s32(vacc0_hi, vbicq_s32(vacc0_hi, vzero_shift_mask), 31);
-    vacc1_hi = vsraq_n_s32(vacc1_hi, vbicq_s32(vacc1_hi, vzero_shift_mask), 31);
-    vacc2_hi = vsraq_n_s32(vacc2_hi, vbicq_s32(vacc2_hi, vzero_shift_mask), 31);
-    vacc3_hi = vsraq_n_s32(vacc3_hi, vbicq_s32(vacc3_hi, vzero_shift_mask), 31);
-
-    vacc0_lo = vrshlq_s32(vacc0_lo, vright_shift);
-    vacc1_lo = vrshlq_s32(vacc1_lo, vright_shift);
-    vacc2_lo = vrshlq_s32(vacc2_lo, vright_shift);
-    vacc3_lo = vrshlq_s32(vacc3_lo, vright_shift);
-    vacc0_hi = vrshlq_s32(vacc0_hi, vright_shift);
-    vacc1_hi = vrshlq_s32(vacc1_hi, vright_shift);
-    vacc2_hi = vrshlq_s32(vacc2_hi, vright_shift);
-    vacc3_hi = vrshlq_s32(vacc3_hi, vright_shift);
-
-    // Pack, saturate, and add output zero point.
-    const int16x8_t vacc0 = vqaddq_s16(vqmovn_high_s32(vqmovn_s32(vacc0_lo), vacc0_hi), vy_zero_point);
-    const int16x8_t vacc1 = vqaddq_s16(vqmovn_high_s32(vqmovn_s32(vacc1_lo), vacc1_hi), vy_zero_point);
-    const int16x8_t vacc2 = vqaddq_s16(vqmovn_high_s32(vqmovn_s32(vacc2_lo), vacc2_hi), vy_zero_point);
-    const int16x8_t vacc3 = vqaddq_s16(vqmovn_high_s32(vqmovn_s32(vacc3_lo), vacc3_hi), vy_zero_point);
-
-    uint8x16_t vy01 = vqmovun_high_s16(vqmovun_s16(vacc0), vacc1);
-    uint8x16_t vy23 = vqmovun_high_s16(vqmovun_s16(vacc2), vacc3);
-
-    vy01 = vmaxq_u8(vy01, vy_min);
-    vy23 = vmaxq_u8(vy23, vy_min);
-    vy01 = vminq_u8(vy01, vy_max);
-    vy23 = vminq_u8(vy23, vy_max);
-
-    vst1q_u8(y, vy01); y += 16;
-    vst1q_u8(y, vy23); y += 16;
-  }
-#else
-  for (; n >= 16 * sizeof(uint8_t); n -= 16 * sizeof(uint8_t)) {
-    const uint8x16_t va01 = vld1q_u8(a); a += 16;
-    const uint8x16_t vb01 = vld1q_u8(b); b += 16;
-
-    // Subtract zero point.
-    const int16x8_t vxa0 = vreinterpretq_s16_u16(vsubl_u8(vget_low_u8(va01), va_zero_point));
-    const int16x8_t vxb0 = vreinterpretq_s16_u16(vsubl_u8(vget_low_u8(vb01), vb_zero_point));
-    const int16x8_t vxa1 = vreinterpretq_s16_u16(vsubl_u8(vget_high_u8(va01), va_zero_point));
-    const int16x8_t vxb1 = vreinterpretq_s16_u16(vsubl_u8(vget_high_u8(vb01), vb_zero_point));
-
-    // Multiply by factors and accumulate products.
-    int32x4_t vacc0_lo = vmulq_s32(vmovl_s16(vget_low_s16(vxa0)), va_multiplier);
-    int32x4_t vacc1_lo = vmulq_s32(vmovl_s16(vget_low_s16(vxa1)), va_multiplier);
-    int32x4_t vacc0_hi = vmulq_s32(vmovl_s16(vget_high_s16(vxa0)), va_multiplier);
-    int32x4_t vacc1_hi = vmulq_s32(vmovl_s16(vget_high_s16(vxa1)), va_multiplier);
-
-    __builtin_prefetch(a + 640);
-    __builtin_prefetch(b + 640);
-
-    vacc0_lo = vmlaq_s32(vacc0_lo, vmovl_s16(vget_low_s16(vxb0)), vb_multiplier);
-    vacc1_lo = vmlaq_s32(vacc1_lo, vmovl_s16(vget_low_s16(vxb1)), vb_multiplier);
-    vacc0_hi = vmlaq_s32(vacc0_hi, vmovl_s16(vget_high_s16(vxb0)), vb_multiplier);
-    vacc1_hi = vmlaq_s32(vacc1_hi, vmovl_s16(vget_high_s16(vxb1)), vb_multiplier);
-
-    // Shift right and round.
-    vacc0_lo = vsraq_n_s32(vacc0_lo, vbicq_s32(vacc0_lo, vzero_shift_mask), 31);
-    vacc1_lo = vsraq_n_s32(vacc1_lo, vbicq_s32(vacc1_lo, vzero_shift_mask), 31);
-    vacc0_hi = vsraq_n_s32(vacc0_hi, vbicq_s32(vacc0_hi, vzero_shift_mask), 31);
-    vacc1_hi = vsraq_n_s32(vacc1_hi, vbicq_s32(vacc1_hi, vzero_shift_mask), 31);
-
-    vacc0_lo = vrshlq_s32(vacc0_lo, vright_shift);
-    vacc1_lo = vrshlq_s32(vacc1_lo, vright_shift);
-    vacc0_hi = vrshlq_s32(vacc0_hi, vright_shift);
-    vacc1_hi = vrshlq_s32(vacc1_hi, vright_shift);
-
-    // Pack, saturate, and add output zero point.
-    const int16x8_t vacc0 = vqaddq_s16(vcombine_s16(vqmovn_s32(vacc0_lo), vqmovn_s32(vacc0_hi)), vy_zero_point);
-    const int16x8_t vacc1 = vqaddq_s16(vcombine_s16(vqmovn_s32(vacc1_lo), vqmovn_s32(vacc1_hi)), vy_zero_point);
-
-    uint8x16_t vy01 = vcombine_u8(vqmovun_s16(vacc0), vqmovun_s16(vacc1));
-    vy01 = vmaxq_u8(vy01, vy_min);
-    vy01 = vminq_u8(vy01, vy_max);
-
-    vst1q_u8(y, vy01); y += 16;
-  }
-#endif
-  for (; n >= 8 * sizeof(uint8_t); n -= 8 * sizeof(uint8_t)) {
-    const uint8x8_t va = vld1_u8(a); a += 8;
-    const uint8x8_t vb = vld1_u8(b); b += 8;
-
-    // Subtract zero point.
-    const int16x8_t vxa = vreinterpretq_s16_u16(vsubl_u8(va, va_zero_point));
-    const int16x8_t vxb = vreinterpretq_s16_u16(vsubl_u8(vb, vb_zero_point));
-
-    // Multiply by factors and accumulate products.
-    int32x4_t vacc_lo = vmulq_s32(vmovl_s16(vget_low_s16(vxa)), va_multiplier);
-#if XNN_ARCH_ARM64
-    int32x4_t vacc_hi = vmulq_s32(vmovl_high_s16(vxa), va_multiplier);
-#else
-    int32x4_t vacc_hi = vmulq_s32(vmovl_s16(vget_high_s16(vxa)), va_multiplier);
-#endif
-
-    vacc_lo = vmlaq_s32(vacc_lo, vmovl_s16(vget_low_s16(vxb)), vb_multiplier);
-#if XNN_ARCH_ARM64
-    vacc_hi = vmlaq_s32(vacc_hi, vmovl_high_s16(vxb), vb_multiplier);
-#else
-    vacc_hi = vmlaq_s32(vacc_hi, vmovl_s16(vget_high_s16(vxb)), vb_multiplier);
-#endif
-
-    // Shift right and round.
-    vacc_lo = vsraq_n_s32(vacc_lo, vbicq_s32(vacc_lo, vzero_shift_mask), 31);
-    vacc_hi = vsraq_n_s32(vacc_hi, vbicq_s32(vacc_hi, vzero_shift_mask), 31);
-
-    vacc_lo = vrshlq_s32(vacc_lo, vright_shift);
-    vacc_hi = vrshlq_s32(vacc_hi, vright_shift);
-
-    // Pack, saturate, and add output zero point.
-#if XNN_ARCH_ARM64
-    const int16x8_t vacc = vqaddq_s16(vqmovn_high_s32(vqmovn_s32(vacc_lo), vacc_hi), vy_zero_point);
-#else
-    const int16x8_t vacc = vqaddq_s16(vcombine_s16(vqmovn_s32(vacc_lo), vqmovn_s32(vacc_hi)), vy_zero_point);
-#endif
-
-    uint8x8_t vy = vqmovun_s16(vacc);
-    vy = vmax_u8(vy, vget_low_u8(vy_min));
-    vy = vmin_u8(vy, vget_low_u8(vy_max));
-
-    vst1_u8(y, vy); y += 8;
-  }
-  if (n != 0) {
-    const uint8x8_t va = vld1_u8(a);
-    const uint8x8_t vb = vld1_u8(b);
-
-    // Subtract zero point.
-    const int16x8_t vxa = vreinterpretq_s16_u16(vsubl_u8(va, va_zero_point));
-    const int16x8_t vxb = vreinterpretq_s16_u16(vsubl_u8(vb, vb_zero_point));
-
-    // Multiply by factors and accumulate products.
-    int32x4_t vacc_lo = vmulq_s32(vmovl_s16(vget_low_s16(vxa)), va_multiplier);
-#if XNN_ARCH_ARM64
-    int32x4_t vacc_hi = vmulq_s32(vmovl_high_s16(vxa), va_multiplier);
-#else
-    int32x4_t vacc_hi = vmulq_s32(vmovl_s16(vget_high_s16(vxa)), va_multiplier);
-#endif
-
-    vacc_lo = vmlaq_s32(vacc_lo, vmovl_s16(vget_low_s16(vxb)), vb_multiplier);
-#if XNN_ARCH_ARM64
-    vacc_hi = vmlaq_s32(vacc_hi, vmovl_high_s16(vxb), vb_multiplier);
-#else
-    vacc_hi = vmlaq_s32(vacc_hi, vmovl_s16(vget_high_s16(vxb)), vb_multiplier);
-#endif
-
-    // Shift right and round.
-    vacc_lo = vsraq_n_s32(vacc_lo, vbicq_s32(vacc_lo, vzero_shift_mask), 31);
-    vacc_hi = vsraq_n_s32(vacc_hi, vbicq_s32(vacc_hi, vzero_shift_mask), 31);
-
-    vacc_lo = vrshlq_s32(vacc_lo, vright_shift);
-    vacc_hi = vrshlq_s32(vacc_hi, vright_shift);
-
-    // Pack, saturate, and add output zero point.
-#if XNN_ARCH_ARM64
-    const int16x8_t vacc = vqaddq_s16(vqmovn_high_s32(vqmovn_s32(vacc_lo), vacc_hi), vy_zero_point);
-#else
-    const int16x8_t vacc = vqaddq_s16(vcombine_s16(vqmovn_s32(vacc_lo), vqmovn_s32(vacc_hi)), vy_zero_point);
-#endif
-
-    uint8x8_t vy = vqmovun_s16(vacc);
-    vy = vmax_u8(vy, vget_low_u8(vy_min));
-    vy = vmin_u8(vy, vget_low_u8(vy_max));
-
-    if (n & (4 * sizeof(uint8_t))) {
-      vst1_lane_u32(__builtin_assume_aligned(y, 1), vreinterpret_u32_u8(vy), 0); y += 4;
-      vy = vext_u8(vy, vy, 4);
-    }
-    if (n & (2 * sizeof(uint8_t))) {
-      vst1_lane_u16(__builtin_assume_aligned(y, 1), vreinterpret_u16_u8(vy), 0); y += 2;
-      vy = vext_u8(vy, vy, 2);
-    }
-    if (n & (1 * sizeof(uint8_t))) {
-      vst1_lane_u8(y, vy, 0);
-    }
-  }
-}
diff --git a/src/qu8-vadd/minmax-scalar.c b/src/qu8-vadd/minmax-scalar.c
deleted file mode 100644
index b38c5e2..0000000
--- a/src/qu8-vadd/minmax-scalar.c
+++ /dev/null
@@ -1,56 +0,0 @@
-// Copyright 2019 Google LLC
-//
-// This source code is licensed under the BSD-style license found in the
-// LICENSE file in the root directory of this source tree.
-
-#include <assert.h>
-
-#include <xnnpack/common.h>
-#include <xnnpack/vadd.h>
-#include <xnnpack/math.h>
-
-
-void xnn_qu8_vadd_minmax_ukernel__scalar_x1(
-    size_t n,
-    const uint8_t* a,
-    const uint8_t* b,
-    uint8_t* y,
-    const union xnn_qu8_add_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
-{
-  assert(n != 0);
-
-  const int32_t vzero_point_product = params->scalar.zero_point_product;
-  const uint32_t va_multiplier = params->scalar.a_multiplier;
-  const uint32_t vb_multiplier = params->scalar.b_multiplier;
-  const uint32_t vshift = params->scalar.shift;
-  const int32_t vremainder_mask = params->scalar.remainder_mask;
-  const int32_t vremainder_threshold = params->scalar.remainder_threshold;
-  const int32_t vy_zero_point = params->scalar.y_zero_point;
-  const int32_t vy_max = params->scalar.y_max;
-  const int32_t vy_min = params->scalar.y_min;
-
-  do {
-    const int32_t va = (int32_t) (uint32_t) *a++;
-    const int32_t vb = (int32_t) (uint32_t) *b++;
-
-    // Multiply by factors.
-    const int32_t va_product = va * va_multiplier;
-    const int32_t vb_product = vb * vb_multiplier;
-
-    // Accumulate products.
-    const int32_t vacc = vzero_point_product + va_product + vb_product;
-
-    // Shift right and round.
-    const int32_t vremainder = (vacc & vremainder_mask) - (int32_t) (vacc < 0);
-    int32_t vy = asr_s32(vacc, vshift) + (int32_t) (vremainder > vremainder_threshold);
-
-    // Pack, saturate, and add output zero point.
-    vy += vy_zero_point;
-    vy = vy < vy_min ? vy_min : vy;
-    vy = vy > vy_max ? vy_max : vy;
-
-    *y++ = vy;
-
-    n -= sizeof(uint8_t);
-  } while (n != 0);
-}
diff --git a/src/qu8-vadd/minmax-sse2.c b/src/qu8-vadd/minmax-sse2.c
deleted file mode 100644
index 458fa7b..0000000
--- a/src/qu8-vadd/minmax-sse2.c
+++ /dev/null
@@ -1,129 +0,0 @@
-// Copyright (c) Facebook, Inc. and its affiliates.
-// All rights reserved.
-//
-// Copyright 2019 Google LLC
-//
-// This source code is licensed under the BSD-style license found in the
-// LICENSE file in the root directory of this source tree.
-
-#include <immintrin.h>
-
-#include <xnnpack/common.h>
-#include <xnnpack/vadd.h>
-
-
-void xnn_qu8_vadd_minmax_ukernel__sse2_x8(
-    size_t n,
-    const uint8_t* a,
-    const uint8_t* b,
-    uint8_t* y,
-    const union xnn_qu8_add_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_DISABLE_TSAN
-{
-  const __m128i vzero_point_product = _mm_load_si128((const __m128i*) &params->sse2.zero_point_product);
-  const __m128i va_multiplier_lo = _mm_load_si128((const __m128i*) &params->sse2.a_multiplier_lo);
-  const __m128i va_multiplier_hi = _mm_load_si128((const __m128i*) &params->sse2.a_multiplier_hi);
-  const __m128i vb_multiplier_lo = _mm_load_si128((const __m128i*) &params->sse2.b_multiplier_lo);
-  const __m128i vb_multiplier_hi = _mm_load_si128((const __m128i*) &params->sse2.b_multiplier_hi);
-  const __m128i vremainder_mask = _mm_load_si128((const __m128i*) params->sse2.remainder_mask);
-  const __m128i vremainder_threshold = _mm_load_si128((const __m128i*) params->sse2.remainder_threshold);
-  const __m128i vshift = _mm_cvtsi32_si128((int) params->sse2.shift);
-
-  const __m128i vzero = _mm_setzero_si128();
-  for (; n >= 8 * sizeof(uint8_t); n -= 8 * sizeof(uint8_t)) {
-    const __m128i va = _mm_loadl_epi64((const __m128i*) a);
-    a += 8;
-    const __m128i vb = _mm_loadl_epi64((const __m128i*) b);
-    b += 8;
-
-    const __m128i vxa = _mm_unpacklo_epi8(va, vzero);
-    const __m128i vxb = _mm_unpacklo_epi8(vb, vzero);
-
-    // Multiply by factors.
-    const __m128i va_product_lo = _mm_mullo_epi16(vxa, va_multiplier_lo);
-    const __m128i va_product_hi =
-      _mm_add_epi16(_mm_mulhi_epu16(vxa, va_multiplier_lo), _mm_mullo_epi16(vxa, va_multiplier_hi));
-
-    const __m128i vb_product_lo = _mm_mullo_epi16(vxb, vb_multiplier_lo);
-    const __m128i vb_product_hi =
-      _mm_add_epi16(_mm_mulhi_epu16(vxb, vb_multiplier_lo), _mm_mullo_epi16(vxb, vb_multiplier_hi));
-
-    // Accumulate products.
-    __m128i vacc_lo = _mm_add_epi32(vzero_point_product, _mm_unpacklo_epi16(va_product_lo, va_product_hi));
-    __m128i vacc_hi = _mm_add_epi32(vzero_point_product, _mm_unpackhi_epi16(va_product_lo, va_product_hi));
-
-    vacc_lo = _mm_add_epi32(vacc_lo, _mm_unpacklo_epi16(vb_product_lo, vb_product_hi));
-    vacc_hi = _mm_add_epi32(vacc_hi, _mm_unpackhi_epi16(vb_product_lo, vb_product_hi));
-
-    // Shift right and round.
-    const __m128i vrem_lo =
-      _mm_add_epi32(_mm_and_si128(vacc_lo, vremainder_mask), _mm_cmpgt_epi32(_mm_setzero_si128(), vacc_lo));
-    const __m128i vrem_hi =
-      _mm_add_epi32(_mm_and_si128(vacc_hi, vremainder_mask), _mm_cmpgt_epi32(_mm_setzero_si128(), vacc_hi));
-
-    vacc_lo = _mm_sub_epi32(_mm_sra_epi32(vacc_lo, vshift), _mm_cmpgt_epi32(vrem_lo, vremainder_threshold));
-    vacc_hi = _mm_sub_epi32(_mm_sra_epi32(vacc_hi, vshift), _mm_cmpgt_epi32(vrem_hi, vremainder_threshold));
-
-    // Pack, saturate, and add output zero point.
-    const __m128i vy_zero_point = _mm_load_si128((const __m128i*) params->sse2.y_zero_point);
-    const __m128i vacc = _mm_adds_epi16(_mm_packs_epi32(vacc_lo, vacc_hi), vy_zero_point);
-    __m128i vy = _mm_packus_epi16(vacc, vacc);
-    vy = _mm_max_epu8(vy, _mm_load_si128((const __m128i*) params->sse2.y_min));
-    vy = _mm_min_epu8(vy, _mm_load_si128((const __m128i*) params->sse2.y_max));
-
-    _mm_storel_epi64((__m128i*) y, vy);
-    y += 8;
-  }
-  if (n != 0) {
-    const __m128i va = _mm_loadl_epi64((const __m128i*) a);
-    const __m128i vb = _mm_loadl_epi64((const __m128i*) b);
-
-    const __m128i vxa = _mm_unpacklo_epi8(va, vzero);
-    const __m128i vxb = _mm_unpacklo_epi8(vb, vzero);
-
-    // Multiply by factors.
-    const __m128i va_product_lo = _mm_mullo_epi16(vxa, va_multiplier_lo);
-    const __m128i va_product_hi =
-      _mm_add_epi16(_mm_mulhi_epu16(vxa, va_multiplier_lo), _mm_mullo_epi16(vxa, va_multiplier_hi));
-
-    const __m128i vb_product_lo = _mm_mullo_epi16(vxb, vb_multiplier_lo);
-    const __m128i vb_product_hi =
-      _mm_add_epi16(_mm_mulhi_epu16(vxb, vb_multiplier_lo), _mm_mullo_epi16(vxb, vb_multiplier_hi));
-
-    // Accumulate products.
-    __m128i vacc_lo = _mm_add_epi32(vzero_point_product, _mm_unpacklo_epi16(va_product_lo, va_product_hi));
-    __m128i vacc_hi = _mm_add_epi32(vzero_point_product, _mm_unpackhi_epi16(va_product_lo, va_product_hi));
-
-    vacc_lo = _mm_add_epi32(vacc_lo, _mm_unpacklo_epi16(vb_product_lo, vb_product_hi));
-    vacc_hi = _mm_add_epi32(vacc_hi, _mm_unpackhi_epi16(vb_product_lo, vb_product_hi));
-
-    // Shift right and round.
-    const __m128i vrem_lo =
-      _mm_add_epi32(_mm_and_si128(vacc_lo, vremainder_mask), _mm_cmpgt_epi32(_mm_setzero_si128(), vacc_lo));
-    const __m128i vrem_hi =
-      _mm_add_epi32(_mm_and_si128(vacc_hi, vremainder_mask), _mm_cmpgt_epi32(_mm_setzero_si128(), vacc_hi));
-
-    vacc_lo = _mm_sub_epi32(_mm_sra_epi32(vacc_lo, vshift), _mm_cmpgt_epi32(vrem_lo, vremainder_threshold));
-    vacc_hi = _mm_sub_epi32(_mm_sra_epi32(vacc_hi, vshift), _mm_cmpgt_epi32(vrem_hi, vremainder_threshold));
-
-    // Pack, saturate, and add output zero point.
-    const __m128i vy_zero_point = _mm_load_si128((const __m128i*) params->sse2.y_zero_point);
-    const __m128i vacc = _mm_adds_epi16(_mm_packs_epi32(vacc_lo, vacc_hi), vy_zero_point);
-    __m128i vy = _mm_packus_epi16(vacc, vacc);
-    vy = _mm_max_epu8(vy, _mm_load_si128((const __m128i*) params->sse2.y_min));
-    vy = _mm_min_epu8(vy, _mm_load_si128((const __m128i*) params->sse2.y_max));
-
-    if (n & (4 * sizeof(uint8_t))) {
-      *((uint32_t*) y) = (uint32_t) _mm_cvtsi128_si32(vy);
-      vy = _mm_srli_epi64(vy, 32);
-      y += 4;
-    }
-    if (n & (2 * sizeof(uint8_t))) {
-      *((uint16_t*) y) = (uint16_t) _mm_extract_epi16(vy, 0);
-      vy = _mm_srli_epi32(vy, 16);
-      y += 2;
-    }
-    if (n & (1 * sizeof(uint8_t))) {
-      *((uint8_t*) y) = (uint8_t) _mm_cvtsi128_si32(vy);
-    }
-  }
-}
diff --git a/src/qu8-vaddc/gen/minmax-neon-ld64-x16.c b/src/qu8-vaddc/gen/minmax-neon-ld64-x16.c
new file mode 100644
index 0000000..dec0c9e
--- /dev/null
+++ b/src/qu8-vaddc/gen/minmax-neon-ld64-x16.c
@@ -0,0 +1,102 @@
+// Auto-generated file. Do not edit!
+//   Template: src/qs8-vaddc/neon-ld64.c.in
+//   Generator: tools/xngen
+//
+// Copyright 2020 Google LLC
+//
+// This source code is licensed under the BSD-style license found in the
+// LICENSE file in the root directory of this source tree.
+
+#include <assert.h>
+
+#include <arm_neon.h>
+
+#include <xnnpack/vadd.h>
+
+#include <inttypes.h>
+
+
+void xnn_qu8_vaddc_minmax_ukernel__neon_ld64_x16(
+    size_t n,
+    const uint8_t* input_a,
+    const uint8_t* input_b,
+    uint8_t* output,
+    const union xnn_qu8_add_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_DISABLE_TSAN
+{
+  const uint8x8_t va_zero_point = vld1_dup_u8(&params->neon.a_zero_point);
+  const int32x4_t va_multiplier = vld1q_dup_s32(&params->neon.a_multiplier);
+  const int32x4_t vright_shift = vld1q_dup_s32(&params->neon.right_shift);
+  const int16x8_t voutput_zero_point = vld1q_dup_s16(&params->neon.output_zero_point);
+  const uint8x16_t voutput_min = vld1q_dup_u8(&params->neon.output_min);
+  const uint8x16_t voutput_max = vld1q_dup_u8(&params->neon.output_max);
+
+  const int32_t vxb = (int32_t) *input_b - (int32_t) params->neon.b_zero_point;
+  const int32_t vb = params->neon.b_multiplier;
+  const int32x4_t vbias = vdupq_n_s32(vxb * vb);
+
+  for (; n >= 16 * sizeof(uint8_t); n -= 16 * sizeof(uint8_t)) {
+    const uint8x8_t va01234567 = vld1_u8(input_a); input_a += 8;
+    const uint8x8_t va89ABCDEF = vld1_u8(input_a); input_a += 8;
+
+    const int16x8_t vxa01234567 = vreinterpretq_s16_u16(vsubl_u8(va01234567, va_zero_point));
+    const int16x8_t vxa89ABCDEF = vreinterpretq_s16_u16(vsubl_u8(va89ABCDEF, va_zero_point));
+
+    int32x4_t vacc0123 = vmlaq_s32(vbias, vmovl_s16(vget_low_s16(vxa01234567)), va_multiplier);
+    int32x4_t vacc4567 = vmlaq_s32(vbias, vmovl_s16(vget_high_s16(vxa01234567)), va_multiplier);
+    int32x4_t vacc89AB = vmlaq_s32(vbias, vmovl_s16(vget_low_s16(vxa89ABCDEF)), va_multiplier);
+    int32x4_t vaccCDEF = vmlaq_s32(vbias, vmovl_s16(vget_high_s16(vxa89ABCDEF)), va_multiplier);
+
+    vacc0123 = vrshlq_s32(vacc0123, vright_shift);
+    vacc4567 = vrshlq_s32(vacc4567, vright_shift);
+    vacc89AB = vrshlq_s32(vacc89AB, vright_shift);
+    vaccCDEF = vrshlq_s32(vaccCDEF, vright_shift);
+
+    const int16x8_t vacc01234567 = vqaddq_s16(vcombine_s16(vqmovn_s32(vacc0123), vqmovn_s32(vacc4567)), voutput_zero_point);
+    const int16x8_t vacc89ABCDEF = vqaddq_s16(vcombine_s16(vqmovn_s32(vacc89AB), vqmovn_s32(vaccCDEF)), voutput_zero_point);
+
+    uint8x16_t vout0123456789ABCDEF = vcombine_u8(vqmovun_s16(vacc01234567), vqmovun_s16(vacc89ABCDEF));
+
+    vout0123456789ABCDEF = vmaxq_u8(vout0123456789ABCDEF, voutput_min);
+
+    vout0123456789ABCDEF = vminq_u8(vout0123456789ABCDEF, voutput_max);
+
+    vst1q_u8(output, vout0123456789ABCDEF); output += 16;
+  }
+  if XNN_UNLIKELY(n != 0) {
+    do {
+      const uint8x8_t va01234567 = vld1_u8(input_a); input_a += 8;
+
+      const int16x8_t vxa01234567 = vreinterpretq_s16_u16(vsubl_u8(va01234567, va_zero_point));
+
+      int32x4_t vacc0123 = vmlaq_s32(vbias, vmovl_s16(vget_low_s16(vxa01234567)), va_multiplier);
+      int32x4_t vacc4567 = vmlaq_s32(vbias, vmovl_s16(vget_high_s16(vxa01234567)), va_multiplier);
+
+      vacc0123 = vrshlq_s32(vacc0123, vright_shift);
+      vacc4567 = vrshlq_s32(vacc4567, vright_shift);
+
+      const int16x8_t vacc01234567 = vqaddq_s16(vcombine_s16(vqmovn_s32(vacc0123), vqmovn_s32(vacc4567)), voutput_zero_point);
+
+      uint8x8_t vout01234567 = vqmovun_s16(vacc01234567);
+      vout01234567 = vmax_u8(vout01234567, vget_low_u8(voutput_min));
+      vout01234567 = vmin_u8(vout01234567, vget_low_u8(voutput_max));
+
+      if XNN_LIKELY(n >= (8 * sizeof(uint8_t))) {
+        vst1_u8(output, vout01234567); output += 8;
+        n -= 8 * sizeof(uint8_t);
+      } else {
+        if (n & (4 * sizeof(uint8_t))) {
+          vst1_lane_u32(__builtin_assume_aligned(output, 1), vreinterpret_u32_u8(vout01234567), 0); output += 4;
+          vout01234567 = vext_u8(vout01234567, vout01234567, 4);
+        }
+        if (n & (2 * sizeof(uint8_t))) {
+          vst1_lane_u16(__builtin_assume_aligned(output, 1), vreinterpret_u16_u8(vout01234567), 0); output += 2;
+          vout01234567 = vext_u8(vout01234567, vout01234567, 2);
+        }
+        if (n & (1 * sizeof(uint8_t))) {
+          vst1_lane_u8(output, vout01234567, 0);
+        }
+        n = 0;
+      }
+    } while (n != 0);
+  }
+}
diff --git a/src/qu8-vaddc/gen/minmax-neon-ld64-x8.c b/src/qu8-vaddc/gen/minmax-neon-ld64-x8.c
new file mode 100644
index 0000000..252ce2c
--- /dev/null
+++ b/src/qu8-vaddc/gen/minmax-neon-ld64-x8.c
@@ -0,0 +1,89 @@
+// Auto-generated file. Do not edit!
+//   Template: src/qs8-vaddc/neon-ld64.c.in
+//   Generator: tools/xngen
+//
+// Copyright 2020 Google LLC
+//
+// This source code is licensed under the BSD-style license found in the
+// LICENSE file in the root directory of this source tree.
+
+#include <assert.h>
+
+#include <arm_neon.h>
+
+#include <xnnpack/vadd.h>
+
+#include <inttypes.h>
+
+
+void xnn_qu8_vaddc_minmax_ukernel__neon_ld64_x8(
+    size_t n,
+    const uint8_t* input_a,
+    const uint8_t* input_b,
+    uint8_t* output,
+    const union xnn_qu8_add_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_DISABLE_TSAN
+{
+  const uint8x8_t va_zero_point = vld1_dup_u8(&params->neon.a_zero_point);
+  const int32x4_t va_multiplier = vld1q_dup_s32(&params->neon.a_multiplier);
+  const int32x4_t vright_shift = vld1q_dup_s32(&params->neon.right_shift);
+  const int16x8_t voutput_zero_point = vld1q_dup_s16(&params->neon.output_zero_point);
+  const uint8x16_t voutput_min = vld1q_dup_u8(&params->neon.output_min);
+  const uint8x16_t voutput_max = vld1q_dup_u8(&params->neon.output_max);
+
+  const int32_t vxb = (int32_t) *input_b - (int32_t) params->neon.b_zero_point;
+  const int32_t vb = params->neon.b_multiplier;
+  const int32x4_t vbias = vdupq_n_s32(vxb * vb);
+
+  for (; n >= 8 * sizeof(uint8_t); n -= 8 * sizeof(uint8_t)) {
+    const uint8x8_t va01234567 = vld1_u8(input_a); input_a += 8;
+
+    const int16x8_t vxa01234567 = vreinterpretq_s16_u16(vsubl_u8(va01234567, va_zero_point));
+
+    int32x4_t vacc0123 = vmlaq_s32(vbias, vmovl_s16(vget_low_s16(vxa01234567)), va_multiplier);
+    int32x4_t vacc4567 = vmlaq_s32(vbias, vmovl_s16(vget_high_s16(vxa01234567)), va_multiplier);
+
+    vacc0123 = vrshlq_s32(vacc0123, vright_shift);
+    vacc4567 = vrshlq_s32(vacc4567, vright_shift);
+
+    const int16x8_t vacc01234567 = vqaddq_s16(vcombine_s16(vqmovn_s32(vacc0123), vqmovn_s32(vacc4567)), voutput_zero_point);
+
+    uint8x8_t vout01234567 = vqmovun_s16(vacc01234567);
+
+    vout01234567 = vmax_u8(vout01234567, vget_low_u8(voutput_min));
+
+    vout01234567 = vmin_u8(vout01234567, vget_low_u8(voutput_max));
+
+    vst1_u8(output, vout01234567); output += 8;
+  }
+  if XNN_UNLIKELY(n != 0) {
+    {
+      const uint8x8_t va01234567 = vld1_u8(input_a);
+
+      const int16x8_t vxa01234567 = vreinterpretq_s16_u16(vsubl_u8(va01234567, va_zero_point));
+
+      int32x4_t vacc0123 = vmlaq_s32(vbias, vmovl_s16(vget_low_s16(vxa01234567)), va_multiplier);
+      int32x4_t vacc4567 = vmlaq_s32(vbias, vmovl_s16(vget_high_s16(vxa01234567)), va_multiplier);
+
+      vacc0123 = vrshlq_s32(vacc0123, vright_shift);
+      vacc4567 = vrshlq_s32(vacc4567, vright_shift);
+
+      const int16x8_t vacc01234567 = vqaddq_s16(vcombine_s16(vqmovn_s32(vacc0123), vqmovn_s32(vacc4567)), voutput_zero_point);
+
+      uint8x8_t vout01234567 = vqmovun_s16(vacc01234567);
+      vout01234567 = vmax_u8(vout01234567, vget_low_u8(voutput_min));
+      vout01234567 = vmin_u8(vout01234567, vget_low_u8(voutput_max));
+
+      if (n & (4 * sizeof(uint8_t))) {
+        vst1_lane_u32(__builtin_assume_aligned(output, 1), vreinterpret_u32_u8(vout01234567), 0); output += 4;
+        vout01234567 = vext_u8(vout01234567, vout01234567, 4);
+      }
+      if (n & (2 * sizeof(uint8_t))) {
+        vst1_lane_u16(__builtin_assume_aligned(output, 1), vreinterpret_u16_u8(vout01234567), 0); output += 2;
+        vout01234567 = vext_u8(vout01234567, vout01234567, 2);
+      }
+      if (n & (1 * sizeof(uint8_t))) {
+        vst1_lane_u8(output, vout01234567, 0);
+      }
+    }
+  }
+}
diff --git a/src/qu8-vaddc/gen/minmax-scalar-x1.c b/src/qu8-vaddc/gen/minmax-scalar-x1.c
new file mode 100644
index 0000000..d654728
--- /dev/null
+++ b/src/qu8-vaddc/gen/minmax-scalar-x1.c
@@ -0,0 +1,42 @@
+// Auto-generated file. Do not edit!
+//   Template: src/qs8-vaddc/scalar.c.in
+//   Generator: tools/xngen
+//
+// Copyright 2021 Google LLC
+//
+// This source code is licensed under the BSD-style license found in the
+// LICENSE file in the root directory of this source tree.
+
+#include <assert.h>
+
+#include <xnnpack/math.h>
+#include <xnnpack/vadd.h>
+
+
+void xnn_qu8_vaddc_minmax_ukernel__scalar_x1(
+    size_t n,
+    const uint8_t* input_a,
+    const uint8_t* input_b,
+    uint8_t* output,
+    const union xnn_qu8_add_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_DISABLE_TSAN
+{
+  const int32_t vbias = params->scalar.bias + (int32_t) *input_b * params->scalar.b_multiplier;
+  const int32_t va_multiplier = params->scalar.a_multiplier;
+  const int32_t vrounding = params->scalar.rounding;
+  const uint32_t vshift = params->scalar.shift;
+  const int32_t voutput_min_less_zero_point = params->scalar.output_min_less_zero_point;
+  const int32_t voutput_max_less_zero_point = params->scalar.output_max_less_zero_point;
+  const int32_t voutput_zero_point = params->scalar.output_zero_point;
+
+  do {
+    const int32_t va = *input_a++;
+    const int32_t vacc = vbias + va * va_multiplier;
+
+    int32_t vout = asr_s32(vacc + vrounding, vshift);
+    vout = math_max_s32(vout, voutput_min_less_zero_point);
+    vout = math_min_s32(vout, voutput_max_less_zero_point);
+    *output++ = (uint8_t) (vout + voutput_zero_point);
+
+    n -= sizeof(uint8_t);
+  } while (n != 0);
+}
diff --git a/src/qu8-vaddc/gen/minmax-scalar-x2.c b/src/qu8-vaddc/gen/minmax-scalar-x2.c
new file mode 100644
index 0000000..9d8e4e0
--- /dev/null
+++ b/src/qu8-vaddc/gen/minmax-scalar-x2.c
@@ -0,0 +1,65 @@
+// Auto-generated file. Do not edit!
+//   Template: src/qs8-vaddc/scalar.c.in
+//   Generator: tools/xngen
+//
+// Copyright 2021 Google LLC
+//
+// This source code is licensed under the BSD-style license found in the
+// LICENSE file in the root directory of this source tree.
+
+#include <assert.h>
+
+#include <xnnpack/math.h>
+#include <xnnpack/vadd.h>
+
+
+void xnn_qu8_vaddc_minmax_ukernel__scalar_x2(
+    size_t n,
+    const uint8_t* input_a,
+    const uint8_t* input_b,
+    uint8_t* output,
+    const union xnn_qu8_add_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_DISABLE_TSAN
+{
+  const int32_t vbias = params->scalar.bias + (int32_t) *input_b * params->scalar.b_multiplier;
+  const int32_t va_multiplier = params->scalar.a_multiplier;
+  const int32_t vrounding = params->scalar.rounding;
+  const uint32_t vshift = params->scalar.shift;
+  const int32_t voutput_min_less_zero_point = params->scalar.output_min_less_zero_point;
+  const int32_t voutput_max_less_zero_point = params->scalar.output_max_less_zero_point;
+  const int32_t voutput_zero_point = params->scalar.output_zero_point;
+
+  for (; n >= 2 * sizeof(uint8_t); n -= 2 * sizeof(uint8_t)) {
+    const int32_t va0 = input_a[0];
+    const int32_t va1 = input_a[1];
+    input_a += 2;
+
+    const int32_t vacc0 = vbias + va0 * va_multiplier;
+    const int32_t vacc1 = vbias + va1 * va_multiplier;
+    input_b += 2;
+
+    int32_t vout0 = asr_s32(vacc0 + vrounding, vshift);
+    int32_t vout1 = asr_s32(vacc1 + vrounding, vshift);
+
+    vout0 = math_max_s32(vout0, voutput_min_less_zero_point);
+    vout1 = math_max_s32(vout1, voutput_min_less_zero_point);
+
+    vout0 = math_min_s32(vout0, voutput_max_less_zero_point);
+    vout1 = math_min_s32(vout1, voutput_max_less_zero_point);
+
+    vout0 += voutput_zero_point;
+    vout1 += voutput_zero_point;
+
+    output[0] = (uint8_t) vout0;
+    output[1] = (uint8_t) vout1;
+    output += 2;
+  }
+  if XNN_UNLIKELY(n != 0) {
+    const int32_t va = *input_a;
+    const int32_t vacc = vbias + va * va_multiplier;
+
+    int32_t vout = asr_s32(vacc + vrounding, vshift);
+    vout = math_max_s32(vout, voutput_min_less_zero_point);
+    vout = math_min_s32(vout, voutput_max_less_zero_point);
+    *output++ = (uint8_t) (vout + voutput_zero_point);
+  }
+}
diff --git a/src/qu8-vaddc/gen/minmax-scalar-x4.c b/src/qu8-vaddc/gen/minmax-scalar-x4.c
new file mode 100644
index 0000000..d1a1d07
--- /dev/null
+++ b/src/qu8-vaddc/gen/minmax-scalar-x4.c
@@ -0,0 +1,83 @@
+// Auto-generated file. Do not edit!
+//   Template: src/qs8-vaddc/scalar.c.in
+//   Generator: tools/xngen
+//
+// Copyright 2021 Google LLC
+//
+// This source code is licensed under the BSD-style license found in the
+// LICENSE file in the root directory of this source tree.
+
+#include <assert.h>
+
+#include <xnnpack/math.h>
+#include <xnnpack/vadd.h>
+
+
+void xnn_qu8_vaddc_minmax_ukernel__scalar_x4(
+    size_t n,
+    const uint8_t* input_a,
+    const uint8_t* input_b,
+    uint8_t* output,
+    const union xnn_qu8_add_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_DISABLE_TSAN
+{
+  const int32_t vbias = params->scalar.bias + (int32_t) *input_b * params->scalar.b_multiplier;
+  const int32_t va_multiplier = params->scalar.a_multiplier;
+  const int32_t vrounding = params->scalar.rounding;
+  const uint32_t vshift = params->scalar.shift;
+  const int32_t voutput_min_less_zero_point = params->scalar.output_min_less_zero_point;
+  const int32_t voutput_max_less_zero_point = params->scalar.output_max_less_zero_point;
+  const int32_t voutput_zero_point = params->scalar.output_zero_point;
+
+  for (; n >= 4 * sizeof(uint8_t); n -= 4 * sizeof(uint8_t)) {
+    const int32_t va0 = input_a[0];
+    const int32_t va1 = input_a[1];
+    const int32_t va2 = input_a[2];
+    const int32_t va3 = input_a[3];
+    input_a += 4;
+
+    const int32_t vacc0 = vbias + va0 * va_multiplier;
+    const int32_t vacc1 = vbias + va1 * va_multiplier;
+    const int32_t vacc2 = vbias + va2 * va_multiplier;
+    const int32_t vacc3 = vbias + va3 * va_multiplier;
+    input_b += 4;
+
+    int32_t vout0 = asr_s32(vacc0 + vrounding, vshift);
+    int32_t vout1 = asr_s32(vacc1 + vrounding, vshift);
+    int32_t vout2 = asr_s32(vacc2 + vrounding, vshift);
+    int32_t vout3 = asr_s32(vacc3 + vrounding, vshift);
+
+    vout0 = math_max_s32(vout0, voutput_min_less_zero_point);
+    vout1 = math_max_s32(vout1, voutput_min_less_zero_point);
+    vout2 = math_max_s32(vout2, voutput_min_less_zero_point);
+    vout3 = math_max_s32(vout3, voutput_min_less_zero_point);
+
+    vout0 = math_min_s32(vout0, voutput_max_less_zero_point);
+    vout1 = math_min_s32(vout1, voutput_max_less_zero_point);
+    vout2 = math_min_s32(vout2, voutput_max_less_zero_point);
+    vout3 = math_min_s32(vout3, voutput_max_less_zero_point);
+
+    vout0 += voutput_zero_point;
+    vout1 += voutput_zero_point;
+    vout2 += voutput_zero_point;
+    vout3 += voutput_zero_point;
+
+    output[0] = (uint8_t) vout0;
+    output[1] = (uint8_t) vout1;
+    output[2] = (uint8_t) vout2;
+    output[3] = (uint8_t) vout3;
+    output += 4;
+  }
+  if XNN_UNLIKELY(n != 0) {
+    do {
+      const int32_t va = *input_a++;
+      const int32_t vacc = vbias + va * va_multiplier;
+
+      int32_t vout = asr_s32(vacc + vrounding, vshift);
+      vout = math_max_s32(vout, voutput_min_less_zero_point);
+      vout = math_min_s32(vout, voutput_max_less_zero_point);
+      *output++ = (uint8_t) (vout + voutput_zero_point);
+
+      n -= sizeof(uint8_t);
+    } while (n != 0);
+  }
+}
diff --git a/src/qu8-vaddc/gen/minmax-sse2-mul16-ld64-x16.c b/src/qu8-vaddc/gen/minmax-sse2-mul16-ld64-x16.c
new file mode 100644
index 0000000..e020360
--- /dev/null
+++ b/src/qu8-vaddc/gen/minmax-sse2-mul16-ld64-x16.c
@@ -0,0 +1,123 @@
+// Auto-generated file. Do not edit!
+//   Template: src/qs8-vaddc/sse-mul16-ld64.c.in
+//   Generator: tools/xngen
+//
+// Copyright 2020 Google LLC
+//
+// This source code is licensed under the BSD-style license found in the
+// LICENSE file in the root directory of this source tree.
+
+#include <assert.h>
+
+#include <emmintrin.h>
+
+#include <xnnpack/vadd.h>
+
+
+void xnn_qu8_vaddc_minmax_ukernel__sse2_mul16_ld64_x16(
+    size_t n,
+    const uint8_t* input_a,
+    const uint8_t* input_b,
+    uint8_t* output,
+    const union xnn_qu8_add_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_DISABLE_TSAN
+{
+  const __m128i vbias = _mm_add_epi32(
+    _mm_shuffle_epi32(_mm_cvtsi32_si128(params->sse2.b_multiplier * (int32_t) *input_b), _MM_SHUFFLE(0, 0, 0, 0)),
+    _mm_load_si128((const __m128i*) params->sse2.bias));
+  const __m128i va_multiplier_lo = _mm_load_si128((const __m128i*) params->sse2.a_multiplier_lo);
+  const __m128i va_multiplier_hi = _mm_load_si128((const __m128i*) params->sse2.a_multiplier_hi);
+  const __m128i vrounding = _mm_load_si128((const __m128i*) params->sse2.rounding);
+  const __m128i vshift = _mm_cvtsi32_si128((int) params->sse2.shift);
+  const __m128i voutput_zero_point = _mm_load_si128((const __m128i*) params->sse2.output_zero_point);
+  const __m128i voutput_min = _mm_load_si128((const __m128i*) params->sse2.output_min);
+  const __m128i voutput_max = _mm_load_si128((const __m128i*) params->sse2.output_max);
+
+  for (; n >= 16 * sizeof(uint8_t); n -= 16 * sizeof(uint8_t)) {
+    __m128i va01234567 = _mm_loadl_epi64((const __m128i*) input_a);
+    __m128i va89ABCDEF = _mm_loadl_epi64((const __m128i*) (input_a + 8));
+    input_a += 16;
+
+    const __m128i vzero = _mm_setzero_si128();
+    va01234567 = _mm_unpacklo_epi8(va01234567, vzero);
+    va89ABCDEF = _mm_unpacklo_epi8(va89ABCDEF, vzero);
+
+    __m128i vaprod01234567hi = _mm_mulhi_epu16(va01234567, va_multiplier_lo);
+    const __m128i vaprod01234567lo = _mm_mullo_epi16(va01234567, va_multiplier_lo);
+    __m128i vaprod89ABCDEFhi = _mm_mulhi_epu16(va89ABCDEF, va_multiplier_lo);
+    const __m128i vaprod89ABCDEFlo = _mm_mullo_epi16(va89ABCDEF, va_multiplier_lo);
+
+    vaprod01234567hi = _mm_add_epi16(vaprod01234567hi, _mm_mullo_epi16(va01234567, va_multiplier_hi));
+    vaprod89ABCDEFhi = _mm_add_epi16(vaprod89ABCDEFhi, _mm_mullo_epi16(va89ABCDEF, va_multiplier_hi));
+
+
+    __m128i vacc0123 = _mm_add_epi32(vbias, _mm_unpacklo_epi16(vaprod01234567lo, vaprod01234567hi));
+    __m128i vacc4567 = _mm_add_epi32(vbias, _mm_unpackhi_epi16(vaprod01234567lo, vaprod01234567hi));
+    __m128i vacc89AB = _mm_add_epi32(vbias, _mm_unpacklo_epi16(vaprod89ABCDEFlo, vaprod89ABCDEFhi));
+    __m128i vaccCDEF = _mm_add_epi32(vbias, _mm_unpackhi_epi16(vaprod89ABCDEFlo, vaprod89ABCDEFhi));
+
+    vacc0123 = _mm_sra_epi32(_mm_add_epi32(vacc0123, vrounding), vshift);
+    vacc4567 = _mm_sra_epi32(_mm_add_epi32(vacc4567, vrounding), vshift);
+    vacc89AB = _mm_sra_epi32(_mm_add_epi32(vacc89AB, vrounding), vshift);
+    vaccCDEF = _mm_sra_epi32(_mm_add_epi32(vaccCDEF, vrounding), vshift);
+
+    __m128i vout01234567 = _mm_adds_epi16(_mm_packs_epi32(vacc0123, vacc4567), voutput_zero_point);
+    __m128i vout89ABCDEF = _mm_adds_epi16(_mm_packs_epi32(vacc89AB, vaccCDEF), voutput_zero_point);
+
+
+    __m128i vout0123456789ABCDEF = _mm_packus_epi16(vout01234567, vout89ABCDEF);
+
+    vout0123456789ABCDEF = _mm_max_epu8(vout0123456789ABCDEF, voutput_min);
+
+    vout0123456789ABCDEF = _mm_min_epu8(vout0123456789ABCDEF, voutput_max);
+
+    _mm_storeu_si128((__m128i*) output, vout0123456789ABCDEF);
+    output += 16;
+  }
+  if XNN_UNLIKELY(n != 0) {
+    do {
+      __m128i va01234567 = _mm_loadl_epi64((const __m128i*) input_a);
+      input_a += 8;
+
+      va01234567 = _mm_unpacklo_epi8(va01234567, _mm_setzero_si128());
+
+      __m128i vaprod01234567hi = _mm_mulhi_epu16(va01234567, va_multiplier_lo);
+      const __m128i vaprod01234567lo = _mm_mullo_epi16(va01234567, va_multiplier_lo);
+
+      vaprod01234567hi = _mm_add_epi16(vaprod01234567hi, _mm_mullo_epi16(va01234567, va_multiplier_hi));
+
+
+      __m128i vacc0123 = _mm_add_epi32(vbias, _mm_unpacklo_epi16(vaprod01234567lo, vaprod01234567hi));
+      __m128i vacc4567 = _mm_add_epi32(vbias, _mm_unpackhi_epi16(vaprod01234567lo, vaprod01234567hi));
+
+      vacc0123 = _mm_sra_epi32(_mm_add_epi32(vacc0123, vrounding), vshift);
+      vacc4567 = _mm_sra_epi32(_mm_add_epi32(vacc4567, vrounding), vshift);
+
+      __m128i vout01234567 = _mm_adds_epi16(_mm_packs_epi32(vacc0123, vacc4567), voutput_zero_point);
+
+      __m128i vout0123456701234567 = _mm_packus_epi16(vout01234567, vout01234567);
+      vout0123456701234567 = _mm_max_epu8(vout0123456701234567, voutput_min);
+      vout0123456701234567 = _mm_min_epu8(vout0123456701234567, voutput_max);
+
+      if XNN_LIKELY(n >= (8 * sizeof(uint8_t))) {
+        _mm_storel_epi64((__m128i*) output, vout0123456701234567);
+        output += 8;
+        n -= 8 * sizeof(uint8_t);
+      } else {
+        if (n & (4 * sizeof(uint8_t))) {
+          *((uint32_t*) output) = (uint32_t) _mm_cvtsi128_si32(vout0123456701234567);
+          vout0123456701234567 = _mm_srli_epi64(vout0123456701234567, 32);
+          output += 4;
+        }
+        if (n & (2 * sizeof(uint8_t))) {
+          *((uint16_t*) output) = (uint16_t) _mm_cvtsi128_si32(vout0123456701234567);
+          vout0123456701234567 = _mm_srli_epi32(vout0123456701234567, 16);
+          output += 2;
+        }
+        if (n & (1 * sizeof(uint8_t))) {
+          *output = (int32_t) _mm_cvtsi128_si32(vout0123456701234567);
+        }
+        n = 0;
+      }
+    } while (n != 0);
+  }
+}
diff --git a/src/qu8-vaddc/gen/minmax-sse2-mul16-ld64-x8.c b/src/qu8-vaddc/gen/minmax-sse2-mul16-ld64-x8.c
new file mode 100644
index 0000000..77346a3
--- /dev/null
+++ b/src/qu8-vaddc/gen/minmax-sse2-mul16-ld64-x8.c
@@ -0,0 +1,105 @@
+// Auto-generated file. Do not edit!
+//   Template: src/qs8-vaddc/sse-mul16-ld64.c.in
+//   Generator: tools/xngen
+//
+// Copyright 2020 Google LLC
+//
+// This source code is licensed under the BSD-style license found in the
+// LICENSE file in the root directory of this source tree.
+
+#include <assert.h>
+
+#include <emmintrin.h>
+
+#include <xnnpack/vadd.h>
+
+
+void xnn_qu8_vaddc_minmax_ukernel__sse2_mul16_ld64_x8(
+    size_t n,
+    const uint8_t* input_a,
+    const uint8_t* input_b,
+    uint8_t* output,
+    const union xnn_qu8_add_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_DISABLE_TSAN
+{
+  const __m128i vbias = _mm_add_epi32(
+    _mm_shuffle_epi32(_mm_cvtsi32_si128(params->sse2.b_multiplier * (int32_t) *input_b), _MM_SHUFFLE(0, 0, 0, 0)),
+    _mm_load_si128((const __m128i*) params->sse2.bias));
+  const __m128i va_multiplier_lo = _mm_load_si128((const __m128i*) params->sse2.a_multiplier_lo);
+  const __m128i va_multiplier_hi = _mm_load_si128((const __m128i*) params->sse2.a_multiplier_hi);
+  const __m128i vrounding = _mm_load_si128((const __m128i*) params->sse2.rounding);
+  const __m128i vshift = _mm_cvtsi32_si128((int) params->sse2.shift);
+  const __m128i voutput_zero_point = _mm_load_si128((const __m128i*) params->sse2.output_zero_point);
+  const __m128i voutput_min = _mm_load_si128((const __m128i*) params->sse2.output_min);
+  const __m128i voutput_max = _mm_load_si128((const __m128i*) params->sse2.output_max);
+
+  for (; n >= 8 * sizeof(uint8_t); n -= 8 * sizeof(uint8_t)) {
+    __m128i va01234567 = _mm_loadl_epi64((const __m128i*) input_a);
+    input_a += 8;
+
+    const __m128i vzero = _mm_setzero_si128();
+    va01234567 = _mm_unpacklo_epi8(va01234567, vzero);
+
+    __m128i vaprod01234567hi = _mm_mulhi_epu16(va01234567, va_multiplier_lo);
+    const __m128i vaprod01234567lo = _mm_mullo_epi16(va01234567, va_multiplier_lo);
+
+    vaprod01234567hi = _mm_add_epi16(vaprod01234567hi, _mm_mullo_epi16(va01234567, va_multiplier_hi));
+
+
+    __m128i vacc0123 = _mm_add_epi32(vbias, _mm_unpacklo_epi16(vaprod01234567lo, vaprod01234567hi));
+    __m128i vacc4567 = _mm_add_epi32(vbias, _mm_unpackhi_epi16(vaprod01234567lo, vaprod01234567hi));
+
+    vacc0123 = _mm_sra_epi32(_mm_add_epi32(vacc0123, vrounding), vshift);
+    vacc4567 = _mm_sra_epi32(_mm_add_epi32(vacc4567, vrounding), vshift);
+
+    __m128i vout01234567 = _mm_adds_epi16(_mm_packs_epi32(vacc0123, vacc4567), voutput_zero_point);
+
+
+    __m128i vout0123456701234567 = _mm_packus_epi16(vout01234567, vout01234567);
+
+    vout0123456701234567 = _mm_max_epu8(vout0123456701234567, voutput_min);
+
+    vout0123456701234567 = _mm_min_epu8(vout0123456701234567, voutput_max);
+
+    _mm_storel_epi64((__m128i*) output, vout0123456701234567);
+    output += 8;
+  }
+  if XNN_UNLIKELY(n != 0) {
+    {
+      __m128i va01234567 = _mm_loadl_epi64((const __m128i*) input_a);
+
+      va01234567 = _mm_unpacklo_epi8(va01234567, _mm_setzero_si128());
+
+      __m128i vaprod01234567hi = _mm_mulhi_epu16(va01234567, va_multiplier_lo);
+      const __m128i vaprod01234567lo = _mm_mullo_epi16(va01234567, va_multiplier_lo);
+
+      vaprod01234567hi = _mm_add_epi16(vaprod01234567hi, _mm_mullo_epi16(va01234567, va_multiplier_hi));
+
+
+      __m128i vacc0123 = _mm_add_epi32(vbias, _mm_unpacklo_epi16(vaprod01234567lo, vaprod01234567hi));
+      __m128i vacc4567 = _mm_add_epi32(vbias, _mm_unpackhi_epi16(vaprod01234567lo, vaprod01234567hi));
+
+      vacc0123 = _mm_sra_epi32(_mm_add_epi32(vacc0123, vrounding), vshift);
+      vacc4567 = _mm_sra_epi32(_mm_add_epi32(vacc4567, vrounding), vshift);
+
+      __m128i vout01234567 = _mm_adds_epi16(_mm_packs_epi32(vacc0123, vacc4567), voutput_zero_point);
+
+      __m128i vout0123456701234567 = _mm_packus_epi16(vout01234567, vout01234567);
+      vout0123456701234567 = _mm_max_epu8(vout0123456701234567, voutput_min);
+      vout0123456701234567 = _mm_min_epu8(vout0123456701234567, voutput_max);
+
+      if (n & (4 * sizeof(uint8_t))) {
+        *((uint32_t*) output) = (uint32_t) _mm_cvtsi128_si32(vout0123456701234567);
+        vout0123456701234567 = _mm_srli_epi64(vout0123456701234567, 32);
+        output += 4;
+      }
+      if (n & (2 * sizeof(uint8_t))) {
+        *((uint16_t*) output) = (uint16_t) _mm_cvtsi128_si32(vout0123456701234567);
+        vout0123456701234567 = _mm_srli_epi32(vout0123456701234567, 16);
+        output += 2;
+      }
+      if (n & (1 * sizeof(uint8_t))) {
+        *output = (uint8_t) _mm_cvtsi128_si32(vout0123456701234567);
+      }
+    }
+  }
+}
diff --git a/src/qu8-vaddc/gen/minmax-wasmsimd-x16.c b/src/qu8-vaddc/gen/minmax-wasmsimd-x16.c
new file mode 100644
index 0000000..9f566b0
--- /dev/null
+++ b/src/qu8-vaddc/gen/minmax-wasmsimd-x16.c
@@ -0,0 +1,100 @@
+// Auto-generated file. Do not edit!
+//   Template: src/qs8-vaddc/wasmsimd.c.in
+//   Generator: tools/xngen
+//
+// Copyright 2020 Google LLC
+//
+// This source code is licensed under the BSD-style license found in the
+// LICENSE file in the root directory of this source tree.
+
+#include <assert.h>
+
+#include <wasm_simd128.h>
+
+#include <xnnpack/vadd.h>
+
+
+void xnn_qu8_vaddc_minmax_ukernel__wasmsimd_x16(
+    size_t n,
+    const uint8_t* input_a,
+    const uint8_t* input_b,
+    uint8_t* output,
+    const union xnn_qu8_add_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_DISABLE_TSAN
+{
+  const v128_t va_multiplier = wasm_v128_load(params->wasmsimd.a_multiplier);
+  const v128_t vrounding = wasm_v128_load(params->wasmsimd.rounding);
+  const int32_t vshift = params->wasmsimd.shift;
+  const v128_t voutput_zero_point = wasm_v128_load(params->wasmsimd.output_zero_point);
+  const v128_t voutput_min = wasm_v128_load(params->wasmsimd.output_min);
+  const v128_t voutput_max = wasm_v128_load(params->wasmsimd.output_max);
+
+  v128_t vbias = wasm_i32x4_splat((int32_t) *input_b * params->wasmsimd.b_multiplier[0]);
+  vbias = wasm_i32x4_add(vbias, wasm_v128_load(params->wasmsimd.bias));
+
+  for (; n >= 16 * sizeof(uint8_t); n -= 16 * sizeof(uint8_t)) {
+    const v128_t va01234567 = wasm_u16x8_load8x8(input_a);
+    const v128_t va89ABCDEF = wasm_u16x8_load8x8(input_a + 8);
+    input_a += 16;
+
+    v128_t vacc0123 = wasm_i32x4_add(vbias, wasm_i32x4_mul(wasm_u32x4_extend_low_u16x8(va01234567), va_multiplier));
+    v128_t vacc4567 = wasm_i32x4_add(vbias, wasm_i32x4_mul(wasm_u32x4_extend_high_u16x8(va01234567), va_multiplier));
+    v128_t vacc89AB = wasm_i32x4_add(vbias, wasm_i32x4_mul(wasm_u32x4_extend_low_u16x8(va89ABCDEF), va_multiplier));
+    v128_t vaccCDEF = wasm_i32x4_add(vbias, wasm_i32x4_mul(wasm_u32x4_extend_high_u16x8(va89ABCDEF), va_multiplier));
+
+    vacc0123 = wasm_i32x4_shr(wasm_i32x4_add(vacc0123, vrounding), vshift);
+    vacc4567 = wasm_i32x4_shr(wasm_i32x4_add(vacc4567, vrounding), vshift);
+    vacc89AB = wasm_i32x4_shr(wasm_i32x4_add(vacc89AB, vrounding), vshift);
+    vaccCDEF = wasm_i32x4_shr(wasm_i32x4_add(vaccCDEF, vrounding), vshift);
+
+    v128_t vout01234567 = wasm_i16x8_add_sat(wasm_i16x8_narrow_i32x4(vacc0123, vacc4567), voutput_zero_point);
+    v128_t vout89ABCDEF = wasm_i16x8_add_sat(wasm_i16x8_narrow_i32x4(vacc89AB, vaccCDEF), voutput_zero_point);
+
+    v128_t vout0123456789ABCDEF = wasm_u8x16_narrow_i16x8(vout01234567, vout89ABCDEF);
+
+    vout0123456789ABCDEF = wasm_u8x16_max(vout0123456789ABCDEF, voutput_min);
+
+    vout0123456789ABCDEF = wasm_u8x16_min(vout0123456789ABCDEF, voutput_max);
+
+    wasm_v128_store(output, vout0123456789ABCDEF);
+    output += 16;
+  }
+  if XNN_UNLIKELY(n != 0) {
+    do {
+      const v128_t va01234567 = wasm_u16x8_load8x8(input_a);
+      input_a += 8;
+
+      v128_t vacc0123 = wasm_i32x4_add(vbias, wasm_i32x4_mul(wasm_u32x4_extend_low_u16x8(va01234567), va_multiplier));
+      v128_t vacc4567 = wasm_i32x4_add(vbias, wasm_i32x4_mul(wasm_u32x4_extend_high_u16x8(va01234567), va_multiplier));
+
+      vacc0123 = wasm_i32x4_shr(wasm_i32x4_add(vacc0123, vrounding), vshift);
+      vacc4567 = wasm_i32x4_shr(wasm_i32x4_add(vacc4567, vrounding), vshift);
+
+      v128_t vout01234567 = wasm_i16x8_add_sat(wasm_i16x8_narrow_i32x4(vacc0123, vacc4567), voutput_zero_point);
+
+      v128_t vout0123456701234567 = wasm_u8x16_narrow_i16x8(vout01234567, vout01234567);
+      vout0123456701234567 = wasm_u8x16_max(vout0123456701234567, voutput_min);
+      vout0123456701234567 = wasm_u8x16_min(vout0123456701234567, voutput_max);
+
+      if XNN_LIKELY(n >= (8 * sizeof(uint8_t))) {
+        *((double*) output) = wasm_f64x2_extract_lane(vout0123456701234567, 0);
+        output += 8;
+        n -= 8 * sizeof(uint8_t);
+      } else {
+        if (n & (4 * sizeof(uint8_t))) {
+          *((uint32_t*) output) = (uint32_t) wasm_i32x4_extract_lane(vout0123456701234567, 0);
+          vout0123456701234567 = wasm_u64x2_shr(vout0123456701234567, 32);
+          output += 4;
+        }
+        if (n & (2 * sizeof(uint8_t))) {
+          *((uint16_t*) output) = (uint16_t) wasm_i16x8_extract_lane(vout0123456701234567, 0);
+          vout0123456701234567 = wasm_u32x4_shr(vout0123456701234567, 16);
+          output += 2;
+        }
+        if (n & (1 * sizeof(uint8_t))) {
+          *output = (uint8_t) wasm_i8x16_extract_lane(vout0123456701234567, 0);
+        }
+        n = 0;
+      }
+    } while (n != 0);
+  }
+}
diff --git a/src/qu8-vaddc/gen/minmax-wasmsimd-x8.c b/src/qu8-vaddc/gen/minmax-wasmsimd-x8.c
new file mode 100644
index 0000000..d86eee4
--- /dev/null
+++ b/src/qu8-vaddc/gen/minmax-wasmsimd-x8.c
@@ -0,0 +1,86 @@
+// Auto-generated file. Do not edit!
+//   Template: src/qs8-vaddc/wasmsimd.c.in
+//   Generator: tools/xngen
+//
+// Copyright 2020 Google LLC
+//
+// This source code is licensed under the BSD-style license found in the
+// LICENSE file in the root directory of this source tree.
+
+#include <assert.h>
+
+#include <wasm_simd128.h>
+
+#include <xnnpack/vadd.h>
+
+
+void xnn_qu8_vaddc_minmax_ukernel__wasmsimd_x8(
+    size_t n,
+    const uint8_t* input_a,
+    const uint8_t* input_b,
+    uint8_t* output,
+    const union xnn_qu8_add_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_DISABLE_TSAN
+{
+  const v128_t va_multiplier = wasm_v128_load(params->wasmsimd.a_multiplier);
+  const v128_t vrounding = wasm_v128_load(params->wasmsimd.rounding);
+  const int32_t vshift = params->wasmsimd.shift;
+  const v128_t voutput_zero_point = wasm_v128_load(params->wasmsimd.output_zero_point);
+  const v128_t voutput_min = wasm_v128_load(params->wasmsimd.output_min);
+  const v128_t voutput_max = wasm_v128_load(params->wasmsimd.output_max);
+
+  v128_t vbias = wasm_i32x4_splat((int32_t) *input_b * params->wasmsimd.b_multiplier[0]);
+  vbias = wasm_i32x4_add(vbias, wasm_v128_load(params->wasmsimd.bias));
+
+  for (; n >= 8 * sizeof(uint8_t); n -= 8 * sizeof(uint8_t)) {
+    const v128_t va01234567 = wasm_u16x8_load8x8(input_a);
+    input_a += 8;
+
+    v128_t vacc0123 = wasm_i32x4_add(vbias, wasm_i32x4_mul(wasm_u32x4_extend_low_u16x8(va01234567), va_multiplier));
+    v128_t vacc4567 = wasm_i32x4_add(vbias, wasm_i32x4_mul(wasm_u32x4_extend_high_u16x8(va01234567), va_multiplier));
+
+    vacc0123 = wasm_i32x4_shr(wasm_i32x4_add(vacc0123, vrounding), vshift);
+    vacc4567 = wasm_i32x4_shr(wasm_i32x4_add(vacc4567, vrounding), vshift);
+
+    v128_t vout01234567 = wasm_i16x8_add_sat(wasm_i16x8_narrow_i32x4(vacc0123, vacc4567), voutput_zero_point);
+
+    v128_t vout0123456701234567 = wasm_u8x16_narrow_i16x8(vout01234567, vout01234567);
+
+    vout0123456701234567 = wasm_u8x16_max(vout0123456701234567, voutput_min);
+
+    vout0123456701234567 = wasm_u8x16_min(vout0123456701234567, voutput_max);
+
+    *((double*) output) = wasm_f64x2_extract_lane(vout0123456701234567, 0);
+    output += 8;
+  }
+  if XNN_UNLIKELY(n != 0) {
+    {
+      const v128_t va01234567 = wasm_u16x8_load8x8(input_a);
+
+      v128_t vacc0123 = wasm_i32x4_add(vbias, wasm_i32x4_mul(wasm_u32x4_extend_low_u16x8(va01234567), va_multiplier));
+      v128_t vacc4567 = wasm_i32x4_add(vbias, wasm_i32x4_mul(wasm_u32x4_extend_high_u16x8(va01234567), va_multiplier));
+
+      vacc0123 = wasm_i32x4_shr(wasm_i32x4_add(vacc0123, vrounding), vshift);
+      vacc4567 = wasm_i32x4_shr(wasm_i32x4_add(vacc4567, vrounding), vshift);
+
+      v128_t vout01234567 = wasm_i16x8_add_sat(wasm_i16x8_narrow_i32x4(vacc0123, vacc4567), voutput_zero_point);
+
+      v128_t vout0123456701234567 = wasm_u8x16_narrow_i16x8(vout01234567, vout01234567);
+      vout0123456701234567 = wasm_u8x16_max(vout0123456701234567, voutput_min);
+      vout0123456701234567 = wasm_u8x16_min(vout0123456701234567, voutput_max);
+
+      if (n & (4 * sizeof(uint8_t))) {
+        *((uint32_t*) output) = (uint32_t) wasm_i32x4_extract_lane(vout0123456701234567, 0);
+        vout0123456701234567 = wasm_u64x2_shr(vout0123456701234567, 32);
+        output += 4;
+      }
+      if (n & (2 * sizeof(uint8_t))) {
+        *((uint16_t*) output) = (uint16_t) wasm_i16x8_extract_lane(vout0123456701234567, 0);
+        vout0123456701234567 = wasm_u32x4_shr(vout0123456701234567, 16);
+        output += 2;
+      }
+      if (n & (1 * sizeof(uint8_t))) {
+        *output = (uint8_t) wasm_i8x16_extract_lane(vout0123456701234567, 0);
+      }
+    }
+  }
+}
diff --git a/src/xnnpack/params-init.h b/src/xnnpack/params-init.h
index c74b7a2..3952c91 100644
--- a/src/xnnpack/params-init.h
+++ b/src/xnnpack/params-init.h
@@ -496,15 +496,41 @@
   uint8_t output_min,
   uint8_t output_max);
 
-XNN_INTERNAL void xnn_init_qu8_add_minmax_params(
+#if XNN_ARCH_X86 || XNN_ARCH_X86_64
+XNN_INTERNAL void xnn_init_qu8_add_minmax_sse2_params(
   union xnn_qu8_add_minmax_params params[XNN_MIN_ELEMENTS(1)],
-  uint8_t a_zero_point,
-  uint8_t b_zero_point,
+  uint8_t x_zero_point,
+  uint8_t y_zero_point,
   uint8_t output_zero_point,
-  float a_output_scale,
-  float b_output_scale,
+  float x_output_scale,
+  float y_output_scale,
   uint8_t output_min,
   uint8_t output_max);
+#endif  // XNN_ARCH_X86 || XNN_ARCH_X86_64
+
+#if XNN_ARCH_ARM || XNN_ARCH_ARM64
+XNN_INTERNAL void xnn_init_qu8_add_minmax_neon_params(
+  union xnn_qu8_add_minmax_params params[XNN_MIN_ELEMENTS(1)],
+  uint8_t x_zero_point,
+  uint8_t y_zero_point,
+  uint8_t output_zero_point,
+  float x_output_scale,
+  float y_output_scale,
+  uint8_t output_min,
+  uint8_t output_max);
+#endif  // XNN_ARCH_ARM || XNN_ARCH_ARM64
+
+#if XNN_ARCH_WASMSIMD
+XNN_INTERNAL void xnn_init_qu8_add_minmax_wasmsimd_params(
+  union xnn_qu8_add_minmax_params params[XNN_MIN_ELEMENTS(1)],
+  uint8_t x_zero_point,
+  uint8_t y_zero_point,
+  uint8_t output_zero_point,
+  float x_output_scale,
+  float y_output_scale,
+  uint8_t output_min,
+  uint8_t output_max);
+#endif  // XNN_ARCH_WASMSIMD
 
 XNN_INTERNAL void xnn_init_qu8_add_minmax_scalar_params(
   union xnn_qu8_add_minmax_params params[XNN_MIN_ELEMENTS(1)],
diff --git a/src/xnnpack/params.h b/src/xnnpack/params.h
index 071b07a..3089e3d 100644
--- a/src/xnnpack/params.h
+++ b/src/xnnpack/params.h
@@ -540,45 +540,54 @@
 
 union xnn_qu8_add_minmax_params {
   struct {
-    int32_t zero_point_product;
-    uint32_t a_multiplier;
-    uint32_t b_multiplier;
+    int32_t bias;
+    int32_t a_multiplier;
+    int32_t b_multiplier;
+    int32_t rounding;
     uint32_t shift;
-    int32_t remainder_mask;
-    int32_t remainder_threshold;
-    int32_t y_zero_point;
-    int32_t y_min;
-    int32_t y_max;
+    int32_t output_min_less_zero_point;
+    int32_t output_max_less_zero_point;
+    int32_t output_zero_point;
   } scalar;
 #if XNN_ARCH_ARM || XNN_ARCH_ARM64
   struct {
     uint8_t a_zero_point;
     uint8_t b_zero_point;
-    int16_t y_zero_point;
+    int16_t output_zero_point;
     int32_t a_multiplier;
     int32_t b_multiplier;
     int32_t right_shift;
-    uint8_t y_min;
-    uint8_t y_max;
+    uint8_t output_min;
+    uint8_t output_max;
   } neon;
 #endif  // XNN_ARCH_ARM || XNN_ARCH_ARM64
 #if XNN_ARCH_X86 || XNN_ARCH_X86_64
   struct {
-    XNN_ALIGN(16) int32_t zero_point_product[4];
+    XNN_ALIGN(16) int32_t bias[4];
     XNN_ALIGN(16) uint16_t a_multiplier_lo[8];
     XNN_ALIGN(16) uint16_t a_multiplier_hi[8];
     XNN_ALIGN(16) uint16_t b_multiplier_lo[8];
     XNN_ALIGN(16) uint16_t b_multiplier_hi[8];
-    XNN_ALIGN(16) int32_t remainder_mask[4];
-    XNN_ALIGN(16) int32_t remainder_threshold[4];
-    XNN_ALIGN(16) int16_t y_zero_point[8];
-    XNN_ALIGN(16) uint8_t y_min[16];
-    XNN_ALIGN(16) uint8_t y_max[16];
+    XNN_ALIGN(16) int32_t rounding[4];
     uint32_t shift;
-    uint32_t a_multiplier;
     uint32_t b_multiplier;
+    XNN_ALIGN(16) int16_t output_zero_point[8];
+    XNN_ALIGN(16) uint8_t output_min[16];
+    XNN_ALIGN(16) uint8_t output_max[16];
   } sse2;
 #endif  // XNN_ARCH_X86 || XNN_ARCH_X86_64
+#if XNN_ARCH_WASMSIMD
+  struct {
+    XNN_ALIGN(16) int32_t bias[4];
+    XNN_ALIGN(16) int32_t a_multiplier[4];
+    XNN_ALIGN(16) int32_t b_multiplier[4];
+    XNN_ALIGN(16) int32_t rounding[4];
+    int32_t shift;
+    XNN_ALIGN(16) int16_t output_zero_point[8];
+    XNN_ALIGN(16) uint8_t output_min[16];
+    XNN_ALIGN(16) uint8_t output_max[16];
+  } wasmsimd;
+#endif  // XNN_ARCH_WASMSIMD
 };
 
 union xnn_qs8_add_minmax_params {
diff --git a/src/xnnpack/requantization.h b/src/xnnpack/requantization.h
index e3d8d5f..5ba6d3d 100644
--- a/src/xnnpack/requantization.h
+++ b/src/xnnpack/requantization.h
@@ -412,31 +412,23 @@
   union xnn_qu8_add_minmax_params params)
 {
   // Multiply by factors and accumulate products.
-  int32_t acc = params.scalar.zero_point_product +
-    (int32_t) ((uint32_t) a * params.scalar.a_multiplier) +
-    (int32_t) ((uint32_t) b * params.scalar.b_multiplier);
+  int32_t acc = params.scalar.bias + (int32_t) (uint32_t) a * params.scalar.a_multiplier + (int32_t) (uint32_t) b * params.scalar.b_multiplier;
 
-  // Shift right and round.
-  const int32_t rem = (acc & params.scalar.remainder_mask) - (int32_t) (acc < 0);
-  acc = asr_s32(acc, params.scalar.shift) + (int32_t) (rem > params.scalar.remainder_threshold);
+  // Shift right with rounding away from zero.
+  acc = asr_s32(acc + params.scalar.rounding, params.scalar.shift);
 
   // Clamp and add output zero point.
-  int32_t y = acc + params.scalar.y_zero_point;
-  if (y >= params.scalar.y_max) {
-    y = params.scalar.y_max;
-  }
-  if (y <= params.scalar.y_min) {
-    y = params.scalar.y_min;
-  }
-  return (uint8_t) y;
+  acc = math_max_s32(acc, params.scalar.output_min_less_zero_point);
+  acc = math_min_s32(acc, params.scalar.output_max_less_zero_point);
+  return (int8_t) ((int32_t) acc + params.scalar.output_zero_point);
 }
 
 static inline int8_t xnn_qs8_quantize_add(
-  int8_t x, int8_t y,
+  int8_t a, int8_t b,
   union xnn_qs8_add_minmax_params params)
 {
   // Multiply by factors and accumulate products.
-  int32_t acc = params.scalar.bias + (int32_t) x * params.scalar.a_multiplier + (int32_t) y * params.scalar.b_multiplier;
+  int32_t acc = params.scalar.bias + (int32_t) a * params.scalar.a_multiplier + (int32_t) b * params.scalar.b_multiplier;
 
   // Shift right with rounding away from zero.
   acc = asr_s32(acc + params.scalar.rounding, params.scalar.shift);
diff --git a/src/xnnpack/vadd.h b/src/xnnpack/vadd.h
index cd122fc..5f74dda 100644
--- a/src/xnnpack/vadd.h
+++ b/src/xnnpack/vadd.h
@@ -27,9 +27,67 @@
       uint8_t* output,                                    \
       const union xnn_qu8_add_minmax_params* params);
 
-DECLARE_QU8_VADD_MINMAX_UKERNEL_FUNCTION(xnn_qu8_vadd_minmax_ukernel__neon_x32)
+DECLARE_QU8_VADD_MINMAX_UKERNEL_FUNCTION(xnn_qu8_vadd_minmax_ukernel__neon_ld64_x8)
+DECLARE_QU8_VADD_MINMAX_UKERNEL_FUNCTION(xnn_qu8_vadd_minmax_ukernel__neon_ld64_x16)
+
+DECLARE_QU8_VADD_MINMAX_UKERNEL_FUNCTION(xnn_qu8_vadd_minmax_ukernel__sse2_mul16_ld64_x8)
+DECLARE_QU8_VADD_MINMAX_UKERNEL_FUNCTION(xnn_qu8_vadd_minmax_ukernel__sse2_mul16_ld64_x16)
+
+DECLARE_QU8_VADD_MINMAX_UKERNEL_FUNCTION(xnn_qu8_vadd_minmax_ukernel__sse41_mul16_ld64_x8)
+DECLARE_QU8_VADD_MINMAX_UKERNEL_FUNCTION(xnn_qu8_vadd_minmax_ukernel__sse41_mul16_ld64_x16)
+
+DECLARE_QU8_VADD_MINMAX_UKERNEL_FUNCTION(xnn_qu8_vadd_minmax_ukernel__avx_mul16_ld64_x8)
+DECLARE_QU8_VADD_MINMAX_UKERNEL_FUNCTION(xnn_qu8_vadd_minmax_ukernel__avx_mul16_ld64_x16)
+
+DECLARE_QU8_VADD_MINMAX_UKERNEL_FUNCTION(xnn_qu8_vadd_minmax_ukernel__sse41_mul32_ld32_x8)
+DECLARE_QU8_VADD_MINMAX_UKERNEL_FUNCTION(xnn_qu8_vadd_minmax_ukernel__sse41_mul32_ld32_x16)
+
+DECLARE_QU8_VADD_MINMAX_UKERNEL_FUNCTION(xnn_qu8_vadd_minmax_ukernel__avx_mul32_ld32_x8)
+DECLARE_QU8_VADD_MINMAX_UKERNEL_FUNCTION(xnn_qu8_vadd_minmax_ukernel__avx_mul32_ld32_x16)
+
+DECLARE_QU8_VADD_MINMAX_UKERNEL_FUNCTION(xnn_qu8_vadd_minmax_ukernel__xop_mul32_ld32_x8)
+DECLARE_QU8_VADD_MINMAX_UKERNEL_FUNCTION(xnn_qu8_vadd_minmax_ukernel__xop_mul32_ld32_x16)
+
+DECLARE_QU8_VADD_MINMAX_UKERNEL_FUNCTION(xnn_qu8_vadd_minmax_ukernel__avx2_mul32_ld64_x8)
+DECLARE_QU8_VADD_MINMAX_UKERNEL_FUNCTION(xnn_qu8_vadd_minmax_ukernel__avx2_mul32_ld64_x16)
+
+DECLARE_QU8_VADD_MINMAX_UKERNEL_FUNCTION(xnn_qu8_vadd_minmax_ukernel__wasmsimd_x8)
+DECLARE_QU8_VADD_MINMAX_UKERNEL_FUNCTION(xnn_qu8_vadd_minmax_ukernel__wasmsimd_x16)
+
 DECLARE_QU8_VADD_MINMAX_UKERNEL_FUNCTION(xnn_qu8_vadd_minmax_ukernel__scalar_x1)
-DECLARE_QU8_VADD_MINMAX_UKERNEL_FUNCTION(xnn_qu8_vadd_minmax_ukernel__sse2_x8)
+DECLARE_QU8_VADD_MINMAX_UKERNEL_FUNCTION(xnn_qu8_vadd_minmax_ukernel__scalar_x2)
+DECLARE_QU8_VADD_MINMAX_UKERNEL_FUNCTION(xnn_qu8_vadd_minmax_ukernel__scalar_x4)
+
+DECLARE_QU8_VADD_MINMAX_UKERNEL_FUNCTION(xnn_qu8_vaddc_minmax_ukernel__neon_ld64_x8)
+DECLARE_QU8_VADD_MINMAX_UKERNEL_FUNCTION(xnn_qu8_vaddc_minmax_ukernel__neon_ld64_x16)
+
+DECLARE_QU8_VADD_MINMAX_UKERNEL_FUNCTION(xnn_qu8_vaddc_minmax_ukernel__sse2_mul16_ld64_x8)
+DECLARE_QU8_VADD_MINMAX_UKERNEL_FUNCTION(xnn_qu8_vaddc_minmax_ukernel__sse2_mul16_ld64_x16)
+
+DECLARE_QU8_VADD_MINMAX_UKERNEL_FUNCTION(xnn_qu8_vaddc_minmax_ukernel__sse41_mul16_ld64_x8)
+DECLARE_QU8_VADD_MINMAX_UKERNEL_FUNCTION(xnn_qu8_vaddc_minmax_ukernel__sse41_mul16_ld64_x16)
+
+DECLARE_QU8_VADD_MINMAX_UKERNEL_FUNCTION(xnn_qu8_vaddc_minmax_ukernel__avx_mul16_ld64_x8)
+DECLARE_QU8_VADD_MINMAX_UKERNEL_FUNCTION(xnn_qu8_vaddc_minmax_ukernel__avx_mul16_ld64_x16)
+
+DECLARE_QU8_VADD_MINMAX_UKERNEL_FUNCTION(xnn_qu8_vaddc_minmax_ukernel__sse41_mul32_ld32_x8)
+DECLARE_QU8_VADD_MINMAX_UKERNEL_FUNCTION(xnn_qu8_vaddc_minmax_ukernel__sse41_mul32_ld32_x16)
+
+DECLARE_QU8_VADD_MINMAX_UKERNEL_FUNCTION(xnn_qu8_vaddc_minmax_ukernel__avx_mul32_ld32_x8)
+DECLARE_QU8_VADD_MINMAX_UKERNEL_FUNCTION(xnn_qu8_vaddc_minmax_ukernel__avx_mul32_ld32_x16)
+
+DECLARE_QU8_VADD_MINMAX_UKERNEL_FUNCTION(xnn_qu8_vaddc_minmax_ukernel__xop_mul32_ld32_x8)
+DECLARE_QU8_VADD_MINMAX_UKERNEL_FUNCTION(xnn_qu8_vaddc_minmax_ukernel__xop_mul32_ld32_x16)
+
+DECLARE_QU8_VADD_MINMAX_UKERNEL_FUNCTION(xnn_qu8_vaddc_minmax_ukernel__avx2_mul32_ld64_x8)
+DECLARE_QU8_VADD_MINMAX_UKERNEL_FUNCTION(xnn_qu8_vaddc_minmax_ukernel__avx2_mul32_ld64_x16)
+
+DECLARE_QU8_VADD_MINMAX_UKERNEL_FUNCTION(xnn_qu8_vaddc_minmax_ukernel__wasmsimd_x8)
+DECLARE_QU8_VADD_MINMAX_UKERNEL_FUNCTION(xnn_qu8_vaddc_minmax_ukernel__wasmsimd_x16)
+
+DECLARE_QU8_VADD_MINMAX_UKERNEL_FUNCTION(xnn_qu8_vaddc_minmax_ukernel__scalar_x1)
+DECLARE_QU8_VADD_MINMAX_UKERNEL_FUNCTION(xnn_qu8_vaddc_minmax_ukernel__scalar_x2)
+DECLARE_QU8_VADD_MINMAX_UKERNEL_FUNCTION(xnn_qu8_vaddc_minmax_ukernel__scalar_x4)
 
 
 #define DECLARE_QS8_VADD_MINMAX_UKERNEL_FUNCTION(fn_name) \