Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1 | /* |
| 2 | * McKinley-optimized version of copy_page(). |
| 3 | * |
| 4 | * Copyright (C) 2002 Hewlett-Packard Co |
| 5 | * David Mosberger <davidm@hpl.hp.com> |
| 6 | * |
| 7 | * Inputs: |
| 8 | * in0: address of target page |
| 9 | * in1: address of source page |
| 10 | * Output: |
| 11 | * no return value |
| 12 | * |
| 13 | * General idea: |
| 14 | * - use regular loads and stores to prefetch data to avoid consuming M-slot just for |
| 15 | * lfetches => good for in-cache performance |
| 16 | * - avoid l2 bank-conflicts by not storing into the same 16-byte bank within a single |
| 17 | * cycle |
| 18 | * |
| 19 | * Principle of operation: |
| 20 | * First, note that L1 has a line-size of 64 bytes and L2 a line-size of 128 bytes. |
| 21 | * To avoid secondary misses in L2, we prefetch both source and destination with a line-size |
| 22 | * of 128 bytes. When both of these lines are in the L2 and the first half of the |
| 23 | * source line is in L1, we start copying the remaining words. The second half of the |
| 24 | * source line is prefetched in an earlier iteration, so that by the time we start |
| 25 | * accessing it, it's also present in the L1. |
| 26 | * |
| 27 | * We use a software-pipelined loop to control the overall operation. The pipeline |
| 28 | * has 2*PREFETCH_DIST+K stages. The first PREFETCH_DIST stages are used for prefetching |
| 29 | * source cache-lines. The second PREFETCH_DIST stages are used for prefetching destination |
| 30 | * cache-lines, the last K stages are used to copy the cache-line words not copied by |
| 31 | * the prefetches. The four relevant points in the pipelined are called A, B, C, D: |
| 32 | * p[A] is TRUE if a source-line should be prefetched, p[B] is TRUE if a destination-line |
| 33 | * should be prefetched, p[C] is TRUE if the second half of an L2 line should be brought |
| 34 | * into L1D and p[D] is TRUE if a cacheline needs to be copied. |
| 35 | * |
| 36 | * This all sounds very complicated, but thanks to the modulo-scheduled loop support, |
| 37 | * the resulting code is very regular and quite easy to follow (once you get the idea). |
| 38 | * |
| 39 | * As a secondary optimization, the first 2*PREFETCH_DIST iterations are implemented |
| 40 | * as the separate .prefetch_loop. Logically, this loop performs exactly like the |
| 41 | * main-loop (.line_copy), but has all known-to-be-predicated-off instructions removed, |
| 42 | * so that each loop iteration is faster (again, good for cached case). |
| 43 | * |
| 44 | * When reading the code, it helps to keep the following picture in mind: |
| 45 | * |
| 46 | * word 0 word 1 |
| 47 | * +------+------+--- |
| 48 | * | v[x] | t1 | ^ |
| 49 | * | t2 | t3 | | |
| 50 | * | t4 | t5 | | |
| 51 | * | t6 | t7 | | 128 bytes |
| 52 | * | n[y] | t9 | | (L2 cache line) |
| 53 | * | t10 | t11 | | |
| 54 | * | t12 | t13 | | |
| 55 | * | t14 | t15 | v |
| 56 | * +------+------+--- |
| 57 | * |
| 58 | * Here, v[x] is copied by the (memory) prefetch. n[y] is loaded at p[C] |
| 59 | * to fetch the second-half of the L2 cache line into L1, and the tX words are copied in |
| 60 | * an order that avoids bank conflicts. |
| 61 | */ |
| 62 | #include <asm/asmmacro.h> |
| 63 | #include <asm/page.h> |
Al Viro | e007c53 | 2016-01-17 01:13:41 -0500 | [diff] [blame] | 64 | #include <asm/export.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 65 | |
| 66 | #define PREFETCH_DIST 8 // McKinley sustains 16 outstanding L2 misses (8 ld, 8 st) |
| 67 | |
| 68 | #define src0 r2 |
| 69 | #define src1 r3 |
| 70 | #define dst0 r9 |
| 71 | #define dst1 r10 |
| 72 | #define src_pre_mem r11 |
| 73 | #define dst_pre_mem r14 |
| 74 | #define src_pre_l2 r15 |
| 75 | #define dst_pre_l2 r16 |
| 76 | #define t1 r17 |
| 77 | #define t2 r18 |
| 78 | #define t3 r19 |
| 79 | #define t4 r20 |
| 80 | #define t5 t1 // alias! |
| 81 | #define t6 t2 // alias! |
| 82 | #define t7 t3 // alias! |
| 83 | #define t9 t5 // alias! |
| 84 | #define t10 t4 // alias! |
| 85 | #define t11 t7 // alias! |
| 86 | #define t12 t6 // alias! |
| 87 | #define t14 t10 // alias! |
| 88 | #define t13 r21 |
| 89 | #define t15 r22 |
| 90 | |
| 91 | #define saved_lc r23 |
| 92 | #define saved_pr r24 |
| 93 | |
| 94 | #define A 0 |
| 95 | #define B (PREFETCH_DIST) |
| 96 | #define C (B + PREFETCH_DIST) |
| 97 | #define D (C + 3) |
| 98 | #define N (D + 1) |
| 99 | #define Nrot ((N + 7) & ~7) |
| 100 | |
| 101 | GLOBAL_ENTRY(copy_page) |
| 102 | .prologue |
| 103 | alloc r8 = ar.pfs, 2, Nrot-2, 0, Nrot |
| 104 | |
| 105 | .rotr v[2*PREFETCH_DIST], n[D-C+1] |
| 106 | .rotp p[N] |
| 107 | |
| 108 | .save ar.lc, saved_lc |
| 109 | mov saved_lc = ar.lc |
| 110 | .save pr, saved_pr |
| 111 | mov saved_pr = pr |
| 112 | .body |
| 113 | |
| 114 | mov src_pre_mem = in1 |
| 115 | mov pr.rot = 0x10000 |
| 116 | mov ar.ec = 1 // special unrolled loop |
| 117 | |
| 118 | mov dst_pre_mem = in0 |
| 119 | mov ar.lc = 2*PREFETCH_DIST - 1 |
| 120 | |
| 121 | add src_pre_l2 = 8*8, in1 |
| 122 | add dst_pre_l2 = 8*8, in0 |
| 123 | add src0 = 8, in1 // first t1 src |
| 124 | add src1 = 3*8, in1 // first t3 src |
| 125 | add dst0 = 8, in0 // first t1 dst |
| 126 | add dst1 = 3*8, in0 // first t3 dst |
| 127 | mov t1 = (PAGE_SIZE/128) - (2*PREFETCH_DIST) - 1 |
| 128 | nop.m 0 |
| 129 | nop.i 0 |
| 130 | ;; |
| 131 | // same as .line_copy loop, but with all predicated-off instructions removed: |
| 132 | .prefetch_loop: |
| 133 | (p[A]) ld8 v[A] = [src_pre_mem], 128 // M0 |
| 134 | (p[B]) st8 [dst_pre_mem] = v[B], 128 // M2 |
| 135 | br.ctop.sptk .prefetch_loop |
| 136 | ;; |
| 137 | cmp.eq p16, p0 = r0, r0 // reset p16 to 1 (br.ctop cleared it to zero) |
| 138 | mov ar.lc = t1 // with 64KB pages, t1 is too big to fit in 8 bits! |
| 139 | mov ar.ec = N // # of stages in pipeline |
| 140 | ;; |
| 141 | .line_copy: |
| 142 | (p[D]) ld8 t2 = [src0], 3*8 // M0 |
| 143 | (p[D]) ld8 t4 = [src1], 3*8 // M1 |
| 144 | (p[B]) st8 [dst_pre_mem] = v[B], 128 // M2 prefetch dst from memory |
| 145 | (p[D]) st8 [dst_pre_l2] = n[D-C], 128 // M3 prefetch dst from L2 |
| 146 | ;; |
| 147 | (p[A]) ld8 v[A] = [src_pre_mem], 128 // M0 prefetch src from memory |
| 148 | (p[C]) ld8 n[0] = [src_pre_l2], 128 // M1 prefetch src from L2 |
| 149 | (p[D]) st8 [dst0] = t1, 8 // M2 |
| 150 | (p[D]) st8 [dst1] = t3, 8 // M3 |
| 151 | ;; |
| 152 | (p[D]) ld8 t5 = [src0], 8 |
| 153 | (p[D]) ld8 t7 = [src1], 3*8 |
| 154 | (p[D]) st8 [dst0] = t2, 3*8 |
| 155 | (p[D]) st8 [dst1] = t4, 3*8 |
| 156 | ;; |
| 157 | (p[D]) ld8 t6 = [src0], 3*8 |
| 158 | (p[D]) ld8 t10 = [src1], 8 |
| 159 | (p[D]) st8 [dst0] = t5, 8 |
| 160 | (p[D]) st8 [dst1] = t7, 3*8 |
| 161 | ;; |
| 162 | (p[D]) ld8 t9 = [src0], 3*8 |
| 163 | (p[D]) ld8 t11 = [src1], 3*8 |
| 164 | (p[D]) st8 [dst0] = t6, 3*8 |
| 165 | (p[D]) st8 [dst1] = t10, 8 |
| 166 | ;; |
| 167 | (p[D]) ld8 t12 = [src0], 8 |
| 168 | (p[D]) ld8 t14 = [src1], 8 |
| 169 | (p[D]) st8 [dst0] = t9, 3*8 |
| 170 | (p[D]) st8 [dst1] = t11, 3*8 |
| 171 | ;; |
| 172 | (p[D]) ld8 t13 = [src0], 4*8 |
| 173 | (p[D]) ld8 t15 = [src1], 4*8 |
| 174 | (p[D]) st8 [dst0] = t12, 8 |
| 175 | (p[D]) st8 [dst1] = t14, 8 |
| 176 | ;; |
| 177 | (p[D-1])ld8 t1 = [src0], 8 |
| 178 | (p[D-1])ld8 t3 = [src1], 8 |
| 179 | (p[D]) st8 [dst0] = t13, 4*8 |
| 180 | (p[D]) st8 [dst1] = t15, 4*8 |
| 181 | br.ctop.sptk .line_copy |
| 182 | ;; |
| 183 | mov ar.lc = saved_lc |
| 184 | mov pr = saved_pr, -1 |
| 185 | br.ret.sptk.many rp |
| 186 | END(copy_page) |
Al Viro | e007c53 | 2016-01-17 01:13:41 -0500 | [diff] [blame] | 187 | EXPORT_SYMBOL(copy_page) |