Use RNDNU requantization in QS8 VADD[C] microkernels

Rounding midpoints up (RNDNU) is more efficient than previously used RNDNA
(rounding midpoints away from zero) requantization

PiperOrigin-RevId: 385867165
diff --git a/src/qs8-vaddc/gen/minmax-sse41-mul32-ld32-x32.c b/src/qs8-vaddc/gen/minmax-sse41-mul32-ld32-x32.c
index 828f1b4..55eb457 100644
--- a/src/qs8-vaddc/gen/minmax-sse41-mul32-ld32-x32.c
+++ b/src/qs8-vaddc/gen/minmax-sse41-mul32-ld32-x32.c
@@ -53,31 +53,14 @@
     __m128i vaccOPQR = _mm_add_epi32(vbias, _mm_mullo_epi32(vaOPQR, va_multiplier));
     __m128i vaccSTUV = _mm_add_epi32(vbias, _mm_mullo_epi32(vaSTUV, va_multiplier));
 
-    const __m128i vadj0123 = _mm_cmpgt_epi32(_mm_setzero_si128(), vacc0123);
-    vacc0123 = _mm_add_epi32(vacc0123, vrounding);
-    const __m128i vadj4567 = _mm_cmpgt_epi32(_mm_setzero_si128(), vacc4567);
-    vacc4567 = _mm_add_epi32(vacc4567, vrounding);
-    const __m128i vadj89AB = _mm_cmpgt_epi32(_mm_setzero_si128(), vacc89AB);
-    vacc89AB = _mm_add_epi32(vacc89AB, vrounding);
-    const __m128i vadjCDEF = _mm_cmpgt_epi32(_mm_setzero_si128(), vaccCDEF);
-    vaccCDEF = _mm_add_epi32(vaccCDEF, vrounding);
-    const __m128i vadjGHIJ = _mm_cmpgt_epi32(_mm_setzero_si128(), vaccGHIJ);
-    vaccGHIJ = _mm_add_epi32(vaccGHIJ, vrounding);
-    const __m128i vadjKLMN = _mm_cmpgt_epi32(_mm_setzero_si128(), vaccKLMN);
-    vaccKLMN = _mm_add_epi32(vaccKLMN, vrounding);
-    const __m128i vadjOPQR = _mm_cmpgt_epi32(_mm_setzero_si128(), vaccOPQR);
-    vaccOPQR = _mm_add_epi32(vaccOPQR, vrounding);
-    const __m128i vadjSTUV = _mm_cmpgt_epi32(_mm_setzero_si128(), vaccSTUV);
-    vaccSTUV = _mm_add_epi32(vaccSTUV, vrounding);
-
-    vacc0123 = _mm_sra_epi32(_mm_add_epi32(vacc0123, vadj0123), vshift);
-    vacc4567 = _mm_sra_epi32(_mm_add_epi32(vacc4567, vadj4567), vshift);
-    vacc89AB = _mm_sra_epi32(_mm_add_epi32(vacc89AB, vadj89AB), vshift);
-    vaccCDEF = _mm_sra_epi32(_mm_add_epi32(vaccCDEF, vadjCDEF), vshift);
-    vaccGHIJ = _mm_sra_epi32(_mm_add_epi32(vaccGHIJ, vadjGHIJ), vshift);
-    vaccKLMN = _mm_sra_epi32(_mm_add_epi32(vaccKLMN, vadjKLMN), vshift);
-    vaccOPQR = _mm_sra_epi32(_mm_add_epi32(vaccOPQR, vadjOPQR), vshift);
-    vaccSTUV = _mm_sra_epi32(_mm_add_epi32(vaccSTUV, vadjSTUV), vshift);
+    vacc0123 = _mm_sra_epi32(_mm_add_epi32(vacc0123, vrounding), vshift);
+    vacc4567 = _mm_sra_epi32(_mm_add_epi32(vacc4567, vrounding), vshift);
+    vacc89AB = _mm_sra_epi32(_mm_add_epi32(vacc89AB, vrounding), vshift);
+    vaccCDEF = _mm_sra_epi32(_mm_add_epi32(vaccCDEF, vrounding), vshift);
+    vaccGHIJ = _mm_sra_epi32(_mm_add_epi32(vaccGHIJ, vrounding), vshift);
+    vaccKLMN = _mm_sra_epi32(_mm_add_epi32(vaccKLMN, vrounding), vshift);
+    vaccOPQR = _mm_sra_epi32(_mm_add_epi32(vaccOPQR, vrounding), vshift);
+    vaccSTUV = _mm_sra_epi32(_mm_add_epi32(vaccSTUV, vrounding), vshift);
 
     __m128i vout01234567 = _mm_adds_epi16(_mm_packs_epi32(vacc0123, vacc4567), voutput_zero_point);
     __m128i vout89ABCDEF = _mm_adds_epi16(_mm_packs_epi32(vacc89AB, vaccCDEF), voutput_zero_point);
@@ -110,13 +93,8 @@
       __m128i vacc0123 = _mm_add_epi32(vbias, _mm_mullo_epi32(va0123, va_multiplier));
       __m128i vacc4567 = _mm_add_epi32(vbias, _mm_mullo_epi32(va4567, va_multiplier));
 
-      const __m128i vadj0123 = _mm_cmpgt_epi32(_mm_setzero_si128(), vacc0123);
-      const __m128i vadj4567 = _mm_cmpgt_epi32(_mm_setzero_si128(), vacc4567);
-      vacc0123 = _mm_add_epi32(vacc0123, vrounding);
-      vacc4567 = _mm_add_epi32(vacc4567, vrounding);
-
-      vacc0123 = _mm_sra_epi32(_mm_add_epi32(vacc0123, vadj0123), vshift);
-      vacc4567 = _mm_sra_epi32(_mm_add_epi32(vacc4567, vadj4567), vshift);
+      vacc0123 = _mm_sra_epi32(_mm_add_epi32(vacc0123, vrounding), vshift);
+      vacc4567 = _mm_sra_epi32(_mm_add_epi32(vacc4567, vrounding), vshift);
 
       __m128i vout01234567 = _mm_adds_epi16(_mm_packs_epi32(vacc0123, vacc4567), voutput_zero_point);
       vout01234567 = _mm_max_epi16(vout01234567, voutput_min);