Alan Kelly | cd21b02 | 2022-01-14 01:44:59 -0800 | [diff] [blame] | 1 | // Auto-generated file. Do not edit! |
| 2 | // Template: src/x32-transpose/scalar.c.in |
| 3 | // Generator: tools/xngen |
| 4 | // |
| 5 | // Copyright 2021 Google LLC |
| 6 | // |
| 7 | // This source code is licensed under the BSD-style license found in the |
| 8 | // LICENSE file in the root directory of this source tree. |
| 9 | |
| 10 | |
| 11 | #include <assert.h> |
| 12 | |
| 13 | #include <xnnpack/common.h> |
| 14 | #include <xnnpack/math.h> |
| 15 | #include <xnnpack/transpose.h> |
| 16 | |
| 17 | void xnn_x8_transpose_ukernel__4x4_scalar_int( |
| 18 | const uint8_t *input, |
| 19 | uint8_t * output, |
| 20 | size_t input_stride, |
| 21 | size_t output_stride, |
| 22 | size_t block_width, |
| 23 | size_t block_height) |
| 24 | { |
| 25 | assert(output_stride >= block_height * sizeof(int8_t)); |
| 26 | assert(input_stride >= block_width * sizeof(int8_t)); |
| 27 | |
| 28 | const size_t tile_height = 4; |
| 29 | const size_t tile_width = 4; |
| 30 | const size_t tile_wbytes = tile_width * sizeof(int8_t); |
| 31 | const size_t input_reset = tile_wbytes - round_down_po2(block_height, tile_height) * input_stride; |
| 32 | const size_t output_reset = tile_width * output_stride - round_down_po2(block_height, 2) * sizeof(int8_t); |
| 33 | const size_t input_offset = tile_height * input_stride; |
| 34 | |
| 35 | const int8_t* i0 = (const int8_t*) input; |
| 36 | const int8_t* i1 = (const int8_t*) ((uintptr_t) i0 + input_stride); |
| 37 | const int8_t* i2 = (const int8_t*) ((uintptr_t) i1 + input_stride); |
| 38 | const int8_t* i3 = (const int8_t*) ((uintptr_t) i2 + input_stride); |
| 39 | |
| 40 | int8_t* o0 = (int8_t*) output; |
| 41 | int8_t* o1 = (int8_t*) ((uintptr_t) o0 + output_stride); |
| 42 | int8_t* o2 = (int8_t*) ((uintptr_t) o1 + output_stride); |
| 43 | int8_t* o3 = (int8_t*) ((uintptr_t) o2 + output_stride); |
| 44 | |
| 45 | do { |
| 46 | if XNN_UNPREDICTABLE(block_width < 2) { |
| 47 | o1 = o0; |
| 48 | } |
| 49 | if XNN_UNPREDICTABLE(block_width <= 2) { |
| 50 | o2 = o0; |
| 51 | } |
| 52 | if XNN_UNPREDICTABLE(block_width < 4) { |
| 53 | o3 = o0; |
| 54 | } |
| 55 | size_t bh = block_height; |
| 56 | for (; bh >= 4; bh -= 4) { |
| 57 | *o3++ = i0[3]; |
| 58 | *o3++ = i1[3]; |
| 59 | *o3++ = i2[3]; |
| 60 | *o3++ = i3[3]; |
| 61 | *o2++ = i0[2]; |
| 62 | *o2++ = i1[2]; |
| 63 | *o2++ = i2[2]; |
| 64 | *o2++ = i3[2]; |
| 65 | *o1++ = i0[1]; |
| 66 | *o1++ = i1[1]; |
| 67 | *o1++ = i2[1]; |
| 68 | *o1++ = i3[1]; |
| 69 | *o0++ = i0[0]; |
| 70 | *o0++ = i1[0]; |
| 71 | *o0++ = i2[0]; |
| 72 | *o0++ = i3[0]; |
| 73 | i0 = (const int8_t*) ((uintptr_t) i0 + input_offset); |
| 74 | i1 = (const int8_t*) ((uintptr_t) i1 + input_offset); |
| 75 | i2 = (const int8_t*) ((uintptr_t) i2 + input_offset); |
| 76 | i3 = (const int8_t*) ((uintptr_t) i3 + input_offset); |
| 77 | } |
| 78 | const int8_t* i = i0; |
| 79 | if (bh & 2) { |
| 80 | o3[0] = i0[3]; |
| 81 | o3[1] = i1[3]; |
| 82 | o3 += 2; |
| 83 | o2[0] = i0[2]; |
| 84 | o2[1] = i1[2]; |
| 85 | o2 += 2; |
| 86 | o1[0] = i0[1]; |
| 87 | o1[1] = i1[1]; |
| 88 | o1 += 2; |
| 89 | o0[0] = i0[0]; |
| 90 | o0[1] = i1[0]; |
| 91 | o0 += 2; |
| 92 | i = i2; |
| 93 | } |
| 94 | if (bh & 1) { |
| 95 | o3[0] = i[3]; |
| 96 | o2[0] = i[2]; |
| 97 | o1[0] = i[1]; |
| 98 | o0[0] = i[0]; |
| 99 | } |
| 100 | |
| 101 | i0 = (const int8_t*) ((uintptr_t) i0 + input_reset); |
| 102 | i1 = (const int8_t*) ((uintptr_t) i0 + input_stride); |
| 103 | i2 = (const int8_t*) ((uintptr_t) i1 + input_stride); |
| 104 | i3 = (const int8_t*) ((uintptr_t) i2 + input_stride); |
| 105 | o0 = (int8_t*) ((uintptr_t) o0 + output_reset); |
| 106 | o1 = (int8_t*) ((uintptr_t) o1 + output_reset); |
| 107 | o2 = (int8_t*) ((uintptr_t) o2 + output_reset); |
| 108 | o3 = (int8_t*) ((uintptr_t) o3 + output_reset); |
| 109 | block_width = doz(block_width, tile_width); |
| 110 | } while (block_width != 0); |
| 111 | } |