Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1 | /* |
| 2 | * |
| 3 | * Optimized version of the standard strlen() function |
| 4 | * |
| 5 | * |
| 6 | * Inputs: |
| 7 | * in0 address of string |
| 8 | * |
| 9 | * Outputs: |
| 10 | * ret0 the number of characters in the string (0 if empty string) |
| 11 | * does not count the \0 |
| 12 | * |
| 13 | * Copyright (C) 1999, 2001 Hewlett-Packard Co |
| 14 | * Stephane Eranian <eranian@hpl.hp.com> |
| 15 | * |
| 16 | * 09/24/99 S.Eranian add speculation recovery code |
| 17 | */ |
| 18 | |
| 19 | #include <asm/asmmacro.h> |
| 20 | |
| 21 | // |
| 22 | // |
| 23 | // This is an enhanced version of the basic strlen. it includes a combination |
| 24 | // of compute zero index (czx), parallel comparisons, speculative loads and |
| 25 | // loop unroll using rotating registers. |
| 26 | // |
| 27 | // General Ideas about the algorithm: |
| 28 | // The goal is to look at the string in chunks of 8 bytes. |
| 29 | // so we need to do a few extra checks at the beginning because the |
| 30 | // string may not be 8-byte aligned. In this case we load the 8byte |
| 31 | // quantity which includes the start of the string and mask the unused |
| 32 | // bytes with 0xff to avoid confusing czx. |
| 33 | // We use speculative loads and software pipelining to hide memory |
| 34 | // latency and do read ahead safely. This way we defer any exception. |
| 35 | // |
| 36 | // Because we don't want the kernel to be relying on particular |
| 37 | // settings of the DCR register, we provide recovery code in case |
| 38 | // speculation fails. The recovery code is going to "redo" the work using |
| 39 | // only normal loads. If we still get a fault then we generate a |
| 40 | // kernel panic. Otherwise we return the strlen as usual. |
| 41 | // |
| 42 | // The fact that speculation may fail can be caused, for instance, by |
| 43 | // the DCR.dm bit being set. In this case TLB misses are deferred, i.e., |
| 44 | // a NaT bit will be set if the translation is not present. The normal |
| 45 | // load, on the other hand, will cause the translation to be inserted |
| 46 | // if the mapping exists. |
| 47 | // |
| 48 | // It should be noted that we execute recovery code only when we need |
| 49 | // to use the data that has been speculatively loaded: we don't execute |
| 50 | // recovery code on pure read ahead data. |
| 51 | // |
| 52 | // Remarks: |
| 53 | // - the cmp r0,r0 is used as a fast way to initialize a predicate |
| 54 | // register to 1. This is required to make sure that we get the parallel |
| 55 | // compare correct. |
| 56 | // |
| 57 | // - we don't use the epilogue counter to exit the loop but we need to set |
| 58 | // it to zero beforehand. |
| 59 | // |
| 60 | // - after the loop we must test for Nat values because neither the |
| 61 | // czx nor cmp instruction raise a NaT consumption fault. We must be |
| 62 | // careful not to look too far for a Nat for which we don't care. |
| 63 | // For instance we don't need to look at a NaT in val2 if the zero byte |
| 64 | // was in val1. |
| 65 | // |
| 66 | // - Clearly performance tuning is required. |
| 67 | // |
| 68 | // |
| 69 | // |
| 70 | #define saved_pfs r11 |
| 71 | #define tmp r10 |
| 72 | #define base r16 |
| 73 | #define orig r17 |
| 74 | #define saved_pr r18 |
| 75 | #define src r19 |
| 76 | #define mask r20 |
| 77 | #define val r21 |
| 78 | #define val1 r22 |
| 79 | #define val2 r23 |
| 80 | |
| 81 | GLOBAL_ENTRY(strlen) |
| 82 | .prologue |
| 83 | .save ar.pfs, saved_pfs |
| 84 | alloc saved_pfs=ar.pfs,11,0,0,8 // rotating must be multiple of 8 |
| 85 | |
| 86 | .rotr v[2], w[2] // declares our 4 aliases |
| 87 | |
| 88 | extr.u tmp=in0,0,3 // tmp=least significant 3 bits |
| 89 | mov orig=in0 // keep trackof initial byte address |
| 90 | dep src=0,in0,0,3 // src=8byte-aligned in0 address |
| 91 | .save pr, saved_pr |
| 92 | mov saved_pr=pr // preserve predicates (rotation) |
| 93 | ;; |
| 94 | |
| 95 | .body |
| 96 | |
| 97 | ld8 v[1]=[src],8 // must not speculate: can fail here |
| 98 | shl tmp=tmp,3 // multiply by 8bits/byte |
| 99 | mov mask=-1 // our mask |
| 100 | ;; |
| 101 | ld8.s w[1]=[src],8 // speculatively load next |
| 102 | cmp.eq p6,p0=r0,r0 // sets p6 to true for cmp.and |
| 103 | sub tmp=64,tmp // how many bits to shift our mask on the right |
| 104 | ;; |
| 105 | shr.u mask=mask,tmp // zero enough bits to hold v[1] valuable part |
| 106 | mov ar.ec=r0 // clear epilogue counter (saved in ar.pfs) |
| 107 | ;; |
| 108 | add base=-16,src // keep track of aligned base |
| 109 | or v[1]=v[1],mask // now we have a safe initial byte pattern |
| 110 | ;; |
| 111 | 1: |
| 112 | ld8.s v[0]=[src],8 // speculatively load next |
| 113 | czx1.r val1=v[1] // search 0 byte from right |
| 114 | czx1.r val2=w[1] // search 0 byte from right following 8bytes |
| 115 | ;; |
| 116 | ld8.s w[0]=[src],8 // speculatively load next to next |
| 117 | cmp.eq.and p6,p0=8,val1 // p6 = p6 and val1==8 |
| 118 | cmp.eq.and p6,p0=8,val2 // p6 = p6 and mask==8 |
| 119 | (p6) br.wtop.dptk 1b // loop until p6 == 0 |
| 120 | ;; |
| 121 | // |
| 122 | // We must return try the recovery code iff |
| 123 | // val1_is_nat || (val1==8 && val2_is_nat) |
| 124 | // |
| 125 | // XXX Fixme |
| 126 | // - there must be a better way of doing the test |
| 127 | // |
| 128 | cmp.eq p8,p9=8,val1 // p6 = val1 had zero (disambiguate) |
| 129 | tnat.nz p6,p7=val1 // test NaT on val1 |
| 130 | (p6) br.cond.spnt .recover // jump to recovery if val1 is NaT |
| 131 | ;; |
| 132 | // |
| 133 | // if we come here p7 is true, i.e., initialized for // cmp |
| 134 | // |
| 135 | cmp.eq.and p7,p0=8,val1// val1==8? |
| 136 | tnat.nz.and p7,p0=val2 // test NaT if val2 |
| 137 | (p7) br.cond.spnt .recover // jump to recovery if val2 is NaT |
| 138 | ;; |
| 139 | (p8) mov val1=val2 // the other test got us out of the loop |
| 140 | (p8) adds src=-16,src // correct position when 3 ahead |
| 141 | (p9) adds src=-24,src // correct position when 4 ahead |
| 142 | ;; |
| 143 | sub ret0=src,orig // distance from base |
| 144 | sub tmp=8,val1 // which byte in word |
| 145 | mov pr=saved_pr,0xffffffffffff0000 |
| 146 | ;; |
| 147 | sub ret0=ret0,tmp // adjust |
| 148 | mov ar.pfs=saved_pfs // because of ar.ec, restore no matter what |
| 149 | br.ret.sptk.many rp // end of normal execution |
| 150 | |
| 151 | // |
| 152 | // Outlined recovery code when speculation failed |
| 153 | // |
| 154 | // This time we don't use speculation and rely on the normal exception |
| 155 | // mechanism. that's why the loop is not as good as the previous one |
| 156 | // because read ahead is not possible |
| 157 | // |
| 158 | // IMPORTANT: |
| 159 | // Please note that in the case of strlen() as opposed to strlen_user() |
| 160 | // we don't use the exception mechanism, as this function is not |
| 161 | // supposed to fail. If that happens it means we have a bug and the |
| 162 | // code will cause of kernel fault. |
| 163 | // |
| 164 | // XXX Fixme |
| 165 | // - today we restart from the beginning of the string instead |
| 166 | // of trying to continue where we left off. |
| 167 | // |
| 168 | .recover: |
| 169 | ld8 val=[base],8 // will fail if unrecoverable fault |
| 170 | ;; |
| 171 | or val=val,mask // remask first bytes |
| 172 | cmp.eq p0,p6=r0,r0 // nullify first ld8 in loop |
| 173 | ;; |
| 174 | // |
| 175 | // ar.ec is still zero here |
| 176 | // |
| 177 | 2: |
| 178 | (p6) ld8 val=[base],8 // will fail if unrecoverable fault |
| 179 | ;; |
| 180 | czx1.r val1=val // search 0 byte from right |
| 181 | ;; |
| 182 | cmp.eq p6,p0=8,val1 // val1==8 ? |
| 183 | (p6) br.wtop.dptk 2b // loop until p6 == 0 |
| 184 | ;; // (avoid WAW on p63) |
| 185 | sub ret0=base,orig // distance from base |
| 186 | sub tmp=8,val1 |
| 187 | mov pr=saved_pr,0xffffffffffff0000 |
| 188 | ;; |
| 189 | sub ret0=ret0,tmp // length=now - back -1 |
| 190 | mov ar.pfs=saved_pfs // because of ar.ec, restore no matter what |
| 191 | br.ret.sptk.many rp // end of successful recovery code |
| 192 | END(strlen) |