Greg Kroah-Hartman | b244131 | 2017-11-01 15:07:57 +0100 | [diff] [blame] | 1 | // SPDX-License-Identifier: GPL-2.0 |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2 | /* calibrate.c: default delay calibration |
| 3 | * |
| 4 | * Excised from init/main.c |
| 5 | * Copyright (C) 1991, 1992 Linus Torvalds |
| 6 | */ |
| 7 | |
Tim Schmielau | cd354f1 | 2007-02-14 00:33:14 -0800 | [diff] [blame] | 8 | #include <linux/jiffies.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 9 | #include <linux/delay.h> |
| 10 | #include <linux/init.h> |
Andrew Morton | 941e492 | 2008-02-06 01:36:42 -0800 | [diff] [blame] | 11 | #include <linux/timex.h> |
Alok Kataria | 3da757d | 2008-06-20 15:06:33 -0700 | [diff] [blame] | 12 | #include <linux/smp.h> |
Sameer Nanda | 7afe184 | 2011-07-25 17:13:29 -0700 | [diff] [blame] | 13 | #include <linux/percpu.h> |
Venkatesh Pallipadi | 8a9e1b0 | 2005-06-23 00:08:13 -0700 | [diff] [blame] | 14 | |
Alok Kataria | f3f3149 | 2008-06-23 18:21:56 -0700 | [diff] [blame] | 15 | unsigned long lpj_fine; |
Randy Dunlap | bfe8df3 | 2007-10-16 01:23:46 -0700 | [diff] [blame] | 16 | unsigned long preset_lpj; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 17 | static int __init lpj_setup(char *str) |
| 18 | { |
| 19 | preset_lpj = simple_strtoul(str,NULL,0); |
| 20 | return 1; |
| 21 | } |
| 22 | |
| 23 | __setup("lpj=", lpj_setup); |
| 24 | |
Venkatesh Pallipadi | 8a9e1b0 | 2005-06-23 00:08:13 -0700 | [diff] [blame] | 25 | #ifdef ARCH_HAS_READ_CURRENT_TIMER |
| 26 | |
| 27 | /* This routine uses the read_current_timer() routine and gets the |
| 28 | * loops per jiffy directly, instead of guessing it using delay(). |
| 29 | * Also, this code tries to handle non-maskable asynchronous events |
| 30 | * (like SMIs) |
| 31 | */ |
| 32 | #define DELAY_CALIBRATION_TICKS ((HZ < 100) ? 1 : (HZ/100)) |
| 33 | #define MAX_DIRECT_CALIBRATION_RETRIES 5 |
| 34 | |
Paul Gortmaker | 0db0628 | 2013-06-19 14:53:51 -0400 | [diff] [blame] | 35 | static unsigned long calibrate_delay_direct(void) |
Venkatesh Pallipadi | 8a9e1b0 | 2005-06-23 00:08:13 -0700 | [diff] [blame] | 36 | { |
| 37 | unsigned long pre_start, start, post_start; |
| 38 | unsigned long pre_end, end, post_end; |
| 39 | unsigned long start_jiffies; |
Alok Kataria | f3f3149 | 2008-06-23 18:21:56 -0700 | [diff] [blame] | 40 | unsigned long timer_rate_min, timer_rate_max; |
| 41 | unsigned long good_timer_sum = 0; |
| 42 | unsigned long good_timer_count = 0; |
Andrew Worsley | d2b4631 | 2011-05-24 17:13:15 -0700 | [diff] [blame] | 43 | unsigned long measured_times[MAX_DIRECT_CALIBRATION_RETRIES]; |
| 44 | int max = -1; /* index of measured_times with max/min values or not set */ |
| 45 | int min = -1; |
Venkatesh Pallipadi | 8a9e1b0 | 2005-06-23 00:08:13 -0700 | [diff] [blame] | 46 | int i; |
| 47 | |
| 48 | if (read_current_timer(&pre_start) < 0 ) |
| 49 | return 0; |
| 50 | |
| 51 | /* |
| 52 | * A simple loop like |
| 53 | * while ( jiffies < start_jiffies+1) |
| 54 | * start = read_current_timer(); |
| 55 | * will not do. As we don't really know whether jiffy switch |
| 56 | * happened first or timer_value was read first. And some asynchronous |
| 57 | * event can happen between these two events introducing errors in lpj. |
| 58 | * |
| 59 | * So, we do |
| 60 | * 1. pre_start <- When we are sure that jiffy switch hasn't happened |
| 61 | * 2. check jiffy switch |
| 62 | * 3. start <- timer value before or after jiffy switch |
| 63 | * 4. post_start <- When we are sure that jiffy switch has happened |
| 64 | * |
| 65 | * Note, we don't know anything about order of 2 and 3. |
| 66 | * Now, by looking at post_start and pre_start difference, we can |
| 67 | * check whether any asynchronous event happened or not |
| 68 | */ |
| 69 | |
| 70 | for (i = 0; i < MAX_DIRECT_CALIBRATION_RETRIES; i++) { |
| 71 | pre_start = 0; |
| 72 | read_current_timer(&start); |
| 73 | start_jiffies = jiffies; |
Tim Deegan | 70a0622 | 2011-02-10 08:50:41 +0000 | [diff] [blame] | 74 | while (time_before_eq(jiffies, start_jiffies + 1)) { |
Venkatesh Pallipadi | 8a9e1b0 | 2005-06-23 00:08:13 -0700 | [diff] [blame] | 75 | pre_start = start; |
| 76 | read_current_timer(&start); |
| 77 | } |
| 78 | read_current_timer(&post_start); |
| 79 | |
| 80 | pre_end = 0; |
| 81 | end = post_start; |
Tim Deegan | 70a0622 | 2011-02-10 08:50:41 +0000 | [diff] [blame] | 82 | while (time_before_eq(jiffies, start_jiffies + 1 + |
| 83 | DELAY_CALIBRATION_TICKS)) { |
Venkatesh Pallipadi | 8a9e1b0 | 2005-06-23 00:08:13 -0700 | [diff] [blame] | 84 | pre_end = end; |
| 85 | read_current_timer(&end); |
| 86 | } |
| 87 | read_current_timer(&post_end); |
| 88 | |
Alok Kataria | f3f3149 | 2008-06-23 18:21:56 -0700 | [diff] [blame] | 89 | timer_rate_max = (post_end - pre_start) / |
| 90 | DELAY_CALIBRATION_TICKS; |
| 91 | timer_rate_min = (pre_end - post_start) / |
| 92 | DELAY_CALIBRATION_TICKS; |
Venkatesh Pallipadi | 8a9e1b0 | 2005-06-23 00:08:13 -0700 | [diff] [blame] | 93 | |
| 94 | /* |
Alok Kataria | f3f3149 | 2008-06-23 18:21:56 -0700 | [diff] [blame] | 95 | * If the upper limit and lower limit of the timer_rate is |
Venkatesh Pallipadi | 8a9e1b0 | 2005-06-23 00:08:13 -0700 | [diff] [blame] | 96 | * >= 12.5% apart, redo calibration. |
| 97 | */ |
Andrew Worsley | d2b4631 | 2011-05-24 17:13:15 -0700 | [diff] [blame] | 98 | if (start >= post_end) |
| 99 | printk(KERN_NOTICE "calibrate_delay_direct() ignoring " |
| 100 | "timer_rate as we had a TSC wrap around" |
| 101 | " start=%lu >=post_end=%lu\n", |
| 102 | start, post_end); |
| 103 | if (start < post_end && pre_start != 0 && pre_end != 0 && |
Alok Kataria | f3f3149 | 2008-06-23 18:21:56 -0700 | [diff] [blame] | 104 | (timer_rate_max - timer_rate_min) < (timer_rate_max >> 3)) { |
| 105 | good_timer_count++; |
| 106 | good_timer_sum += timer_rate_max; |
Andrew Worsley | d2b4631 | 2011-05-24 17:13:15 -0700 | [diff] [blame] | 107 | measured_times[i] = timer_rate_max; |
| 108 | if (max < 0 || timer_rate_max > measured_times[max]) |
| 109 | max = i; |
| 110 | if (min < 0 || timer_rate_max < measured_times[min]) |
| 111 | min = i; |
| 112 | } else |
| 113 | measured_times[i] = 0; |
| 114 | |
Venkatesh Pallipadi | 8a9e1b0 | 2005-06-23 00:08:13 -0700 | [diff] [blame] | 115 | } |
| 116 | |
Andrew Worsley | d2b4631 | 2011-05-24 17:13:15 -0700 | [diff] [blame] | 117 | /* |
| 118 | * Find the maximum & minimum - if they differ too much throw out the |
| 119 | * one with the largest difference from the mean and try again... |
| 120 | */ |
| 121 | while (good_timer_count > 1) { |
| 122 | unsigned long estimate; |
| 123 | unsigned long maxdiff; |
Venkatesh Pallipadi | 8a9e1b0 | 2005-06-23 00:08:13 -0700 | [diff] [blame] | 124 | |
Andrew Worsley | d2b4631 | 2011-05-24 17:13:15 -0700 | [diff] [blame] | 125 | /* compute the estimate */ |
| 126 | estimate = (good_timer_sum/good_timer_count); |
| 127 | maxdiff = estimate >> 3; |
| 128 | |
| 129 | /* if range is within 12% let's take it */ |
| 130 | if ((measured_times[max] - measured_times[min]) < maxdiff) |
| 131 | return estimate; |
| 132 | |
| 133 | /* ok - drop the worse value and try again... */ |
| 134 | good_timer_sum = 0; |
| 135 | good_timer_count = 0; |
| 136 | if ((measured_times[max] - estimate) < |
| 137 | (estimate - measured_times[min])) { |
| 138 | printk(KERN_NOTICE "calibrate_delay_direct() dropping " |
| 139 | "min bogoMips estimate %d = %lu\n", |
| 140 | min, measured_times[min]); |
| 141 | measured_times[min] = 0; |
| 142 | min = max; |
| 143 | } else { |
| 144 | printk(KERN_NOTICE "calibrate_delay_direct() dropping " |
| 145 | "max bogoMips estimate %d = %lu\n", |
| 146 | max, measured_times[max]); |
| 147 | measured_times[max] = 0; |
| 148 | max = min; |
| 149 | } |
| 150 | |
| 151 | for (i = 0; i < MAX_DIRECT_CALIBRATION_RETRIES; i++) { |
| 152 | if (measured_times[i] == 0) |
| 153 | continue; |
| 154 | good_timer_count++; |
| 155 | good_timer_sum += measured_times[i]; |
| 156 | if (measured_times[i] < measured_times[min]) |
| 157 | min = i; |
| 158 | if (measured_times[i] > measured_times[max]) |
| 159 | max = i; |
| 160 | } |
| 161 | |
| 162 | } |
| 163 | |
| 164 | printk(KERN_NOTICE "calibrate_delay_direct() failed to get a good " |
| 165 | "estimate for loops_per_jiffy.\nProbably due to long platform " |
| 166 | "interrupts. Consider using \"lpj=\" boot option.\n"); |
Venkatesh Pallipadi | 8a9e1b0 | 2005-06-23 00:08:13 -0700 | [diff] [blame] | 167 | return 0; |
| 168 | } |
| 169 | #else |
Paul Gortmaker | 0db0628 | 2013-06-19 14:53:51 -0400 | [diff] [blame] | 170 | static unsigned long calibrate_delay_direct(void) |
| 171 | { |
| 172 | return 0; |
| 173 | } |
Venkatesh Pallipadi | 8a9e1b0 | 2005-06-23 00:08:13 -0700 | [diff] [blame] | 174 | #endif |
| 175 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 176 | /* |
| 177 | * This is the number of bits of precision for the loops_per_jiffy. Each |
Phil Carmody | 191e568 | 2011-03-22 16:34:13 -0700 | [diff] [blame] | 178 | * time we refine our estimate after the first takes 1.5/HZ seconds, so try |
| 179 | * to start with a good estimate. |
Alok Kataria | 3da757d | 2008-06-20 15:06:33 -0700 | [diff] [blame] | 180 | * For the boot cpu we can skip the delay calibration and assign it a value |
Alok Kataria | f3f3149 | 2008-06-23 18:21:56 -0700 | [diff] [blame] | 181 | * calculated based on the timer frequency. |
| 182 | * For the rest of the CPUs we cannot assume that the timer frequency is same as |
Alok Kataria | 3da757d | 2008-06-20 15:06:33 -0700 | [diff] [blame] | 183 | * the cpu frequency, hence do the calibration for those. |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 184 | */ |
| 185 | #define LPS_PREC 8 |
| 186 | |
Paul Gortmaker | 0db0628 | 2013-06-19 14:53:51 -0400 | [diff] [blame] | 187 | static unsigned long calibrate_delay_converge(void) |
Phil Carmody | 71c696b | 2011-03-22 16:34:12 -0700 | [diff] [blame] | 188 | { |
Phil Carmody | 191e568 | 2011-03-22 16:34:13 -0700 | [diff] [blame] | 189 | /* First stage - slowly accelerate to find initial bounds */ |
Phil Carmody | b1b5f65 | 2011-03-22 16:34:15 -0700 | [diff] [blame] | 190 | unsigned long lpj, lpj_base, ticks, loopadd, loopadd_base, chop_limit; |
Phil Carmody | 191e568 | 2011-03-22 16:34:13 -0700 | [diff] [blame] | 191 | int trials = 0, band = 0, trial_in_band = 0; |
Phil Carmody | 71c696b | 2011-03-22 16:34:12 -0700 | [diff] [blame] | 192 | |
| 193 | lpj = (1<<12); |
Phil Carmody | 191e568 | 2011-03-22 16:34:13 -0700 | [diff] [blame] | 194 | |
| 195 | /* wait for "start of" clock tick */ |
| 196 | ticks = jiffies; |
| 197 | while (ticks == jiffies) |
| 198 | ; /* nothing */ |
| 199 | /* Go .. */ |
| 200 | ticks = jiffies; |
| 201 | do { |
| 202 | if (++trial_in_band == (1<<band)) { |
| 203 | ++band; |
| 204 | trial_in_band = 0; |
| 205 | } |
| 206 | __delay(lpj * band); |
| 207 | trials += band; |
| 208 | } while (ticks == jiffies); |
| 209 | /* |
| 210 | * We overshot, so retreat to a clear underestimate. Then estimate |
| 211 | * the largest likely undershoot. This defines our chop bounds. |
| 212 | */ |
| 213 | trials -= band; |
Phil Carmody | b1b5f65 | 2011-03-22 16:34:15 -0700 | [diff] [blame] | 214 | loopadd_base = lpj * band; |
| 215 | lpj_base = lpj * trials; |
| 216 | |
| 217 | recalibrate: |
| 218 | lpj = lpj_base; |
| 219 | loopadd = loopadd_base; |
Phil Carmody | 71c696b | 2011-03-22 16:34:12 -0700 | [diff] [blame] | 220 | |
| 221 | /* |
| 222 | * Do a binary approximation to get lpj set to |
Phil Carmody | 191e568 | 2011-03-22 16:34:13 -0700 | [diff] [blame] | 223 | * equal one clock (up to LPS_PREC bits) |
Phil Carmody | 71c696b | 2011-03-22 16:34:12 -0700 | [diff] [blame] | 224 | */ |
Phil Carmody | b1b5f65 | 2011-03-22 16:34:15 -0700 | [diff] [blame] | 225 | chop_limit = lpj >> LPS_PREC; |
Phil Carmody | 191e568 | 2011-03-22 16:34:13 -0700 | [diff] [blame] | 226 | while (loopadd > chop_limit) { |
| 227 | lpj += loopadd; |
Phil Carmody | 71c696b | 2011-03-22 16:34:12 -0700 | [diff] [blame] | 228 | ticks = jiffies; |
| 229 | while (ticks == jiffies) |
Phil Carmody | 191e568 | 2011-03-22 16:34:13 -0700 | [diff] [blame] | 230 | ; /* nothing */ |
Phil Carmody | 71c696b | 2011-03-22 16:34:12 -0700 | [diff] [blame] | 231 | ticks = jiffies; |
| 232 | __delay(lpj); |
| 233 | if (jiffies != ticks) /* longer than 1 tick */ |
Phil Carmody | 191e568 | 2011-03-22 16:34:13 -0700 | [diff] [blame] | 234 | lpj -= loopadd; |
| 235 | loopadd >>= 1; |
Phil Carmody | 71c696b | 2011-03-22 16:34:12 -0700 | [diff] [blame] | 236 | } |
Phil Carmody | b1b5f65 | 2011-03-22 16:34:15 -0700 | [diff] [blame] | 237 | /* |
| 238 | * If we incremented every single time possible, presume we've |
| 239 | * massively underestimated initially, and retry with a higher |
| 240 | * start, and larger range. (Only seen on x86_64, due to SMIs) |
| 241 | */ |
| 242 | if (lpj + loopadd * 2 == lpj_base + loopadd_base * 2) { |
| 243 | lpj_base = lpj; |
| 244 | loopadd_base <<= 2; |
| 245 | goto recalibrate; |
| 246 | } |
Phil Carmody | 71c696b | 2011-03-22 16:34:12 -0700 | [diff] [blame] | 247 | |
| 248 | return lpj; |
| 249 | } |
| 250 | |
Sameer Nanda | 7afe184 | 2011-07-25 17:13:29 -0700 | [diff] [blame] | 251 | static DEFINE_PER_CPU(unsigned long, cpu_loops_per_jiffy) = { 0 }; |
| 252 | |
Jack Steiner | b565201 | 2011-11-15 15:33:56 -0800 | [diff] [blame] | 253 | /* |
| 254 | * Check if cpu calibration delay is already known. For example, |
| 255 | * some processors with multi-core sockets may have all cores |
| 256 | * with the same calibration delay. |
| 257 | * |
| 258 | * Architectures should override this function if a faster calibration |
| 259 | * method is available. |
| 260 | */ |
Paul Gortmaker | 0db0628 | 2013-06-19 14:53:51 -0400 | [diff] [blame] | 261 | unsigned long __attribute__((weak)) calibrate_delay_is_known(void) |
Jack Steiner | b565201 | 2011-11-15 15:33:56 -0800 | [diff] [blame] | 262 | { |
| 263 | return 0; |
| 264 | } |
| 265 | |
Peter De Schrijver | e663911 | 2014-06-12 18:58:27 +0300 | [diff] [blame] | 266 | /* |
| 267 | * Indicate the cpu delay calibration is done. This can be used by |
| 268 | * architectures to stop accepting delay timer registrations after this point. |
| 269 | */ |
| 270 | |
| 271 | void __attribute__((weak)) calibration_delay_done(void) |
| 272 | { |
| 273 | } |
| 274 | |
Paul Gortmaker | 0db0628 | 2013-06-19 14:53:51 -0400 | [diff] [blame] | 275 | void calibrate_delay(void) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 276 | { |
Russell King | 1b19ca9 | 2011-06-22 11:55:50 +0100 | [diff] [blame] | 277 | unsigned long lpj; |
Mike Travis | feae320 | 2009-11-17 18:22:13 -0600 | [diff] [blame] | 278 | static bool printed; |
Sameer Nanda | 7afe184 | 2011-07-25 17:13:29 -0700 | [diff] [blame] | 279 | int this_cpu = smp_processor_id(); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 280 | |
Sameer Nanda | 7afe184 | 2011-07-25 17:13:29 -0700 | [diff] [blame] | 281 | if (per_cpu(cpu_loops_per_jiffy, this_cpu)) { |
| 282 | lpj = per_cpu(cpu_loops_per_jiffy, this_cpu); |
Diwakar Tundlam | 8595c53 | 2012-03-23 15:02:28 -0700 | [diff] [blame] | 283 | if (!printed) |
| 284 | pr_info("Calibrating delay loop (skipped) " |
Sameer Nanda | 7afe184 | 2011-07-25 17:13:29 -0700 | [diff] [blame] | 285 | "already calibrated this CPU"); |
| 286 | } else if (preset_lpj) { |
Russell King | 1b19ca9 | 2011-06-22 11:55:50 +0100 | [diff] [blame] | 287 | lpj = preset_lpj; |
Mike Travis | feae320 | 2009-11-17 18:22:13 -0600 | [diff] [blame] | 288 | if (!printed) |
| 289 | pr_info("Calibrating delay loop (skipped) " |
| 290 | "preset value.. "); |
| 291 | } else if ((!printed) && lpj_fine) { |
Russell King | 1b19ca9 | 2011-06-22 11:55:50 +0100 | [diff] [blame] | 292 | lpj = lpj_fine; |
Mike Travis | feae320 | 2009-11-17 18:22:13 -0600 | [diff] [blame] | 293 | pr_info("Calibrating delay loop (skipped), " |
Alok Kataria | f3f3149 | 2008-06-23 18:21:56 -0700 | [diff] [blame] | 294 | "value calculated using timer frequency.. "); |
Jack Steiner | b565201 | 2011-11-15 15:33:56 -0800 | [diff] [blame] | 295 | } else if ((lpj = calibrate_delay_is_known())) { |
| 296 | ; |
Russell King | 1b19ca9 | 2011-06-22 11:55:50 +0100 | [diff] [blame] | 297 | } else if ((lpj = calibrate_delay_direct()) != 0) { |
Mike Travis | feae320 | 2009-11-17 18:22:13 -0600 | [diff] [blame] | 298 | if (!printed) |
| 299 | pr_info("Calibrating delay using timer " |
| 300 | "specific routine.. "); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 301 | } else { |
Mike Travis | feae320 | 2009-11-17 18:22:13 -0600 | [diff] [blame] | 302 | if (!printed) |
| 303 | pr_info("Calibrating delay loop... "); |
Russell King | 1b19ca9 | 2011-06-22 11:55:50 +0100 | [diff] [blame] | 304 | lpj = calibrate_delay_converge(); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 305 | } |
Sameer Nanda | 7afe184 | 2011-07-25 17:13:29 -0700 | [diff] [blame] | 306 | per_cpu(cpu_loops_per_jiffy, this_cpu) = lpj; |
Mike Travis | feae320 | 2009-11-17 18:22:13 -0600 | [diff] [blame] | 307 | if (!printed) |
| 308 | pr_cont("%lu.%02lu BogoMIPS (lpj=%lu)\n", |
Russell King | 1b19ca9 | 2011-06-22 11:55:50 +0100 | [diff] [blame] | 309 | lpj/(500000/HZ), |
| 310 | (lpj/(5000/HZ)) % 100, lpj); |
Mike Travis | feae320 | 2009-11-17 18:22:13 -0600 | [diff] [blame] | 311 | |
Russell King | 1b19ca9 | 2011-06-22 11:55:50 +0100 | [diff] [blame] | 312 | loops_per_jiffy = lpj; |
Mike Travis | feae320 | 2009-11-17 18:22:13 -0600 | [diff] [blame] | 313 | printed = true; |
Peter De Schrijver | e663911 | 2014-06-12 18:58:27 +0300 | [diff] [blame] | 314 | |
| 315 | calibration_delay_done(); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 316 | } |