Refactor pre-SSE4 versions of QS8/QC8 GEMM/IGEMM microkernels

- Replace sign-extension from 8 to 16 bits with a more efficient sequence
- Replace casts to uintptr_t to casts to typed pointer types where possible

PiperOrigin-RevId: 382654408
diff --git a/src/qc8-igemm/gen/4x4c2-minmax-fp32-sse41-ld64.c b/src/qc8-igemm/gen/4x4c2-minmax-fp32-sse41-ld64.c
index c4f0590..73c236c 100644
--- a/src/qc8-igemm/gen/4x4c2-minmax-fp32-sse41-ld64.c
+++ b/src/qc8-igemm/gen/4x4c2-minmax-fp32-sse41-ld64.c
@@ -60,7 +60,7 @@
     __m128i vacc1x0123 = vacc0x0123;
     __m128i vacc2x0123 = vacc0x0123;
     __m128i vacc3x0123 = vacc0x0123;
-    w = (const void*) ((uintptr_t) w + 4 * sizeof(int32_t));
+    w = (const void*) ((const int32_t*) w + 4);
 
     size_t p = ks;
     do {
@@ -108,7 +108,7 @@
           _mm_madd_epi16(_mm_shuffle_epi32(vxa2, _MM_SHUFFLE(0, 0, 0, 0)), vxb0));
         vacc3x0123 = _mm_add_epi32(vacc3x0123,
           _mm_madd_epi16(_mm_shuffle_epi32(vxa3, _MM_SHUFFLE(0, 0, 0, 0)), vxb0));
-        const __m128i vb1 = _mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 8));
+        const __m128i vb1 = _mm_loadl_epi64((const __m128i*) ((const int8_t*) w + 8));
         const __m128i vxb1 = _mm_cvtepi8_epi16(vb1);
 
         vacc0x0123 = _mm_add_epi32(vacc0x0123,
@@ -119,7 +119,7 @@
           _mm_madd_epi16(_mm_shuffle_epi32(vxa2, _MM_SHUFFLE(1, 1, 1, 1)), vxb1));
         vacc3x0123 = _mm_add_epi32(vacc3x0123,
           _mm_madd_epi16(_mm_shuffle_epi32(vxa3, _MM_SHUFFLE(1, 1, 1, 1)), vxb1));
-        const __m128i vb2 = _mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 16));
+        const __m128i vb2 = _mm_loadl_epi64((const __m128i*) ((const int8_t*) w + 16));
         const __m128i vxb2 = _mm_cvtepi8_epi16(vb2);
 
         vacc0x0123 = _mm_add_epi32(vacc0x0123,
@@ -130,7 +130,7 @@
           _mm_madd_epi16(_mm_shuffle_epi32(vxa2, _MM_SHUFFLE(2, 2, 2, 2)), vxb2));
         vacc3x0123 = _mm_add_epi32(vacc3x0123,
           _mm_madd_epi16(_mm_shuffle_epi32(vxa3, _MM_SHUFFLE(2, 2, 2, 2)), vxb2));
-        const __m128i vb3 = _mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 24));
+        const __m128i vb3 = _mm_loadl_epi64((const __m128i*) ((const int8_t*) w + 24));
         const __m128i vxb3 = _mm_cvtepi8_epi16(vb3);
 
         vacc0x0123 = _mm_add_epi32(vacc0x0123,
@@ -142,7 +142,7 @@
         vacc3x0123 = _mm_add_epi32(vacc3x0123,
           _mm_madd_epi16(_mm_shuffle_epi32(vxa3, _MM_SHUFFLE(3, 3, 3, 3)), vxb3));
 
-        w = (const void*) ((uintptr_t) w + 32);
+        w = (const void*) ((const int8_t*) w + 32);
         k -= 8 * sizeof(int8_t);
       }
       if (k != 0) {
@@ -160,8 +160,8 @@
         a3 = (const int8_t*) ((uintptr_t) a3 + k);
 
         const __m128i vb0 = _mm_loadl_epi64((const __m128i*) w);
-        w = (const void*) ((uintptr_t) w + 8);
-        const __m128i vxb0 = _mm_unpacklo_epi8(vb0, _mm_cmpgt_epi8(_mm_setzero_si128(), vb0));
+        w = (const void*) ((const int8_t*) w + 8);
+        const __m128i vxb0 = _mm_srai_epi16(_mm_unpacklo_epi8(vb0, vb0), 8);
 
         vacc0x0123 = _mm_add_epi32(vacc0x0123,
           _mm_madd_epi16(_mm_shuffle_epi32(vxa0, _MM_SHUFFLE(0, 0, 0, 0)), vxb0));
@@ -174,8 +174,8 @@
 
         if (k > 2 * sizeof(int8_t)) {
           const __m128i vb1 = _mm_loadl_epi64((const __m128i*) w);
-          w = (const void*) ((uintptr_t) w + 8);
-          const __m128i vxb1 = _mm_unpacklo_epi8(vb1, _mm_cmpgt_epi8(_mm_setzero_si128(), vb1));
+          w = (const void*) ((const int8_t*) w + 8);
+          const __m128i vxb1 = _mm_srai_epi16(_mm_unpacklo_epi8(vb1, vb1), 8);
 
           vacc0x0123 = _mm_add_epi32(vacc0x0123,
             _mm_madd_epi16(_mm_shuffle_epi32(vxa0, _MM_SHUFFLE(1, 1, 1, 1)), vxb1));
@@ -188,8 +188,8 @@
 
           if (k > 4 * sizeof(int8_t)) {
             const __m128i vb2 = _mm_loadl_epi64((const __m128i*) w);
-            w = (const void*) ((uintptr_t) w + 8);
-            const __m128i vxb2 = _mm_unpacklo_epi8(vb2, _mm_cmpgt_epi8(_mm_setzero_si128(), vb2));
+            w = (const void*) ((const int8_t*) w + 8);
+            const __m128i vxb2 = _mm_srai_epi16(_mm_unpacklo_epi8(vb2, vb2), 8);
 
             vacc0x0123 = _mm_add_epi32(vacc0x0123,
               _mm_madd_epi16(_mm_shuffle_epi32(vxa0, _MM_SHUFFLE(2, 2, 2, 2)), vxb2));
@@ -211,7 +211,7 @@
     __m128 vscaled3x0123 = _mm_cvtepi32_ps(vacc3x0123);
 
     const __m128 vscale0123 = _mm_loadu_ps((const float*) w);
-    w = (const void*) ((uintptr_t) w + 4 * sizeof(float));
+    w = (const void*) ((const float*) w + 4);
     vscaled0x0123 = _mm_mul_ps(vscaled0x0123, vscale0123);
     vscaled1x0123 = _mm_mul_ps(vscaled1x0123, vscale0123);
     vscaled2x0123 = _mm_mul_ps(vscaled2x0123, vscale0123);