blob: ce635dccf3d96921249a2e20084d9f67b5361b33 [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/* calibrate.c: default delay calibration
2 *
3 * Excised from init/main.c
4 * Copyright (C) 1991, 1992 Linus Torvalds
5 */
6
Tim Schmielaucd354f12007-02-14 00:33:14 -08007#include <linux/jiffies.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -07008#include <linux/delay.h>
9#include <linux/init.h>
Andrew Morton941e4922008-02-06 01:36:42 -080010#include <linux/timex.h>
Alok Kataria3da757d2008-06-20 15:06:33 -070011#include <linux/smp.h>
Sameer Nanda7afe1842011-07-25 17:13:29 -070012#include <linux/percpu.h>
Venkatesh Pallipadi8a9e1b02005-06-23 00:08:13 -070013
Alok Katariaf3f31492008-06-23 18:21:56 -070014unsigned long lpj_fine;
Randy Dunlapbfe8df32007-10-16 01:23:46 -070015unsigned long preset_lpj;
Linus Torvalds1da177e2005-04-16 15:20:36 -070016static int __init lpj_setup(char *str)
17{
18 preset_lpj = simple_strtoul(str,NULL,0);
19 return 1;
20}
21
22__setup("lpj=", lpj_setup);
23
Venkatesh Pallipadi8a9e1b02005-06-23 00:08:13 -070024#ifdef ARCH_HAS_READ_CURRENT_TIMER
25
26/* This routine uses the read_current_timer() routine and gets the
27 * loops per jiffy directly, instead of guessing it using delay().
28 * Also, this code tries to handle non-maskable asynchronous events
29 * (like SMIs)
30 */
31#define DELAY_CALIBRATION_TICKS ((HZ < 100) ? 1 : (HZ/100))
32#define MAX_DIRECT_CALIBRATION_RETRIES 5
33
Paul Gortmaker0db06282013-06-19 14:53:51 -040034static unsigned long calibrate_delay_direct(void)
Venkatesh Pallipadi8a9e1b02005-06-23 00:08:13 -070035{
36 unsigned long pre_start, start, post_start;
37 unsigned long pre_end, end, post_end;
38 unsigned long start_jiffies;
Alok Katariaf3f31492008-06-23 18:21:56 -070039 unsigned long timer_rate_min, timer_rate_max;
40 unsigned long good_timer_sum = 0;
41 unsigned long good_timer_count = 0;
Andrew Worsleyd2b46312011-05-24 17:13:15 -070042 unsigned long measured_times[MAX_DIRECT_CALIBRATION_RETRIES];
43 int max = -1; /* index of measured_times with max/min values or not set */
44 int min = -1;
Venkatesh Pallipadi8a9e1b02005-06-23 00:08:13 -070045 int i;
46
47 if (read_current_timer(&pre_start) < 0 )
48 return 0;
49
50 /*
51 * A simple loop like
52 * while ( jiffies < start_jiffies+1)
53 * start = read_current_timer();
54 * will not do. As we don't really know whether jiffy switch
55 * happened first or timer_value was read first. And some asynchronous
56 * event can happen between these two events introducing errors in lpj.
57 *
58 * So, we do
59 * 1. pre_start <- When we are sure that jiffy switch hasn't happened
60 * 2. check jiffy switch
61 * 3. start <- timer value before or after jiffy switch
62 * 4. post_start <- When we are sure that jiffy switch has happened
63 *
64 * Note, we don't know anything about order of 2 and 3.
65 * Now, by looking at post_start and pre_start difference, we can
66 * check whether any asynchronous event happened or not
67 */
68
69 for (i = 0; i < MAX_DIRECT_CALIBRATION_RETRIES; i++) {
70 pre_start = 0;
71 read_current_timer(&start);
72 start_jiffies = jiffies;
Tim Deegan70a06222011-02-10 08:50:41 +000073 while (time_before_eq(jiffies, start_jiffies + 1)) {
Venkatesh Pallipadi8a9e1b02005-06-23 00:08:13 -070074 pre_start = start;
75 read_current_timer(&start);
76 }
77 read_current_timer(&post_start);
78
79 pre_end = 0;
80 end = post_start;
Tim Deegan70a06222011-02-10 08:50:41 +000081 while (time_before_eq(jiffies, start_jiffies + 1 +
82 DELAY_CALIBRATION_TICKS)) {
Venkatesh Pallipadi8a9e1b02005-06-23 00:08:13 -070083 pre_end = end;
84 read_current_timer(&end);
85 }
86 read_current_timer(&post_end);
87
Alok Katariaf3f31492008-06-23 18:21:56 -070088 timer_rate_max = (post_end - pre_start) /
89 DELAY_CALIBRATION_TICKS;
90 timer_rate_min = (pre_end - post_start) /
91 DELAY_CALIBRATION_TICKS;
Venkatesh Pallipadi8a9e1b02005-06-23 00:08:13 -070092
93 /*
Alok Katariaf3f31492008-06-23 18:21:56 -070094 * If the upper limit and lower limit of the timer_rate is
Venkatesh Pallipadi8a9e1b02005-06-23 00:08:13 -070095 * >= 12.5% apart, redo calibration.
96 */
Andrew Worsleyd2b46312011-05-24 17:13:15 -070097 if (start >= post_end)
98 printk(KERN_NOTICE "calibrate_delay_direct() ignoring "
99 "timer_rate as we had a TSC wrap around"
100 " start=%lu >=post_end=%lu\n",
101 start, post_end);
102 if (start < post_end && pre_start != 0 && pre_end != 0 &&
Alok Katariaf3f31492008-06-23 18:21:56 -0700103 (timer_rate_max - timer_rate_min) < (timer_rate_max >> 3)) {
104 good_timer_count++;
105 good_timer_sum += timer_rate_max;
Andrew Worsleyd2b46312011-05-24 17:13:15 -0700106 measured_times[i] = timer_rate_max;
107 if (max < 0 || timer_rate_max > measured_times[max])
108 max = i;
109 if (min < 0 || timer_rate_max < measured_times[min])
110 min = i;
111 } else
112 measured_times[i] = 0;
113
Venkatesh Pallipadi8a9e1b02005-06-23 00:08:13 -0700114 }
115
Andrew Worsleyd2b46312011-05-24 17:13:15 -0700116 /*
117 * Find the maximum & minimum - if they differ too much throw out the
118 * one with the largest difference from the mean and try again...
119 */
120 while (good_timer_count > 1) {
121 unsigned long estimate;
122 unsigned long maxdiff;
Venkatesh Pallipadi8a9e1b02005-06-23 00:08:13 -0700123
Andrew Worsleyd2b46312011-05-24 17:13:15 -0700124 /* compute the estimate */
125 estimate = (good_timer_sum/good_timer_count);
126 maxdiff = estimate >> 3;
127
128 /* if range is within 12% let's take it */
129 if ((measured_times[max] - measured_times[min]) < maxdiff)
130 return estimate;
131
132 /* ok - drop the worse value and try again... */
133 good_timer_sum = 0;
134 good_timer_count = 0;
135 if ((measured_times[max] - estimate) <
136 (estimate - measured_times[min])) {
137 printk(KERN_NOTICE "calibrate_delay_direct() dropping "
138 "min bogoMips estimate %d = %lu\n",
139 min, measured_times[min]);
140 measured_times[min] = 0;
141 min = max;
142 } else {
143 printk(KERN_NOTICE "calibrate_delay_direct() dropping "
144 "max bogoMips estimate %d = %lu\n",
145 max, measured_times[max]);
146 measured_times[max] = 0;
147 max = min;
148 }
149
150 for (i = 0; i < MAX_DIRECT_CALIBRATION_RETRIES; i++) {
151 if (measured_times[i] == 0)
152 continue;
153 good_timer_count++;
154 good_timer_sum += measured_times[i];
155 if (measured_times[i] < measured_times[min])
156 min = i;
157 if (measured_times[i] > measured_times[max])
158 max = i;
159 }
160
161 }
162
163 printk(KERN_NOTICE "calibrate_delay_direct() failed to get a good "
164 "estimate for loops_per_jiffy.\nProbably due to long platform "
165 "interrupts. Consider using \"lpj=\" boot option.\n");
Venkatesh Pallipadi8a9e1b02005-06-23 00:08:13 -0700166 return 0;
167}
168#else
Paul Gortmaker0db06282013-06-19 14:53:51 -0400169static unsigned long calibrate_delay_direct(void)
170{
171 return 0;
172}
Venkatesh Pallipadi8a9e1b02005-06-23 00:08:13 -0700173#endif
174
Linus Torvalds1da177e2005-04-16 15:20:36 -0700175/*
176 * This is the number of bits of precision for the loops_per_jiffy. Each
Phil Carmody191e5682011-03-22 16:34:13 -0700177 * time we refine our estimate after the first takes 1.5/HZ seconds, so try
178 * to start with a good estimate.
Alok Kataria3da757d2008-06-20 15:06:33 -0700179 * For the boot cpu we can skip the delay calibration and assign it a value
Alok Katariaf3f31492008-06-23 18:21:56 -0700180 * calculated based on the timer frequency.
181 * For the rest of the CPUs we cannot assume that the timer frequency is same as
Alok Kataria3da757d2008-06-20 15:06:33 -0700182 * the cpu frequency, hence do the calibration for those.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700183 */
184#define LPS_PREC 8
185
Paul Gortmaker0db06282013-06-19 14:53:51 -0400186static unsigned long calibrate_delay_converge(void)
Phil Carmody71c696b2011-03-22 16:34:12 -0700187{
Phil Carmody191e5682011-03-22 16:34:13 -0700188 /* First stage - slowly accelerate to find initial bounds */
Phil Carmodyb1b5f652011-03-22 16:34:15 -0700189 unsigned long lpj, lpj_base, ticks, loopadd, loopadd_base, chop_limit;
Phil Carmody191e5682011-03-22 16:34:13 -0700190 int trials = 0, band = 0, trial_in_band = 0;
Phil Carmody71c696b2011-03-22 16:34:12 -0700191
192 lpj = (1<<12);
Phil Carmody191e5682011-03-22 16:34:13 -0700193
194 /* wait for "start of" clock tick */
195 ticks = jiffies;
196 while (ticks == jiffies)
197 ; /* nothing */
198 /* Go .. */
199 ticks = jiffies;
200 do {
201 if (++trial_in_band == (1<<band)) {
202 ++band;
203 trial_in_band = 0;
204 }
205 __delay(lpj * band);
206 trials += band;
207 } while (ticks == jiffies);
208 /*
209 * We overshot, so retreat to a clear underestimate. Then estimate
210 * the largest likely undershoot. This defines our chop bounds.
211 */
212 trials -= band;
Phil Carmodyb1b5f652011-03-22 16:34:15 -0700213 loopadd_base = lpj * band;
214 lpj_base = lpj * trials;
215
216recalibrate:
217 lpj = lpj_base;
218 loopadd = loopadd_base;
Phil Carmody71c696b2011-03-22 16:34:12 -0700219
220 /*
221 * Do a binary approximation to get lpj set to
Phil Carmody191e5682011-03-22 16:34:13 -0700222 * equal one clock (up to LPS_PREC bits)
Phil Carmody71c696b2011-03-22 16:34:12 -0700223 */
Phil Carmodyb1b5f652011-03-22 16:34:15 -0700224 chop_limit = lpj >> LPS_PREC;
Phil Carmody191e5682011-03-22 16:34:13 -0700225 while (loopadd > chop_limit) {
226 lpj += loopadd;
Phil Carmody71c696b2011-03-22 16:34:12 -0700227 ticks = jiffies;
228 while (ticks == jiffies)
Phil Carmody191e5682011-03-22 16:34:13 -0700229 ; /* nothing */
Phil Carmody71c696b2011-03-22 16:34:12 -0700230 ticks = jiffies;
231 __delay(lpj);
232 if (jiffies != ticks) /* longer than 1 tick */
Phil Carmody191e5682011-03-22 16:34:13 -0700233 lpj -= loopadd;
234 loopadd >>= 1;
Phil Carmody71c696b2011-03-22 16:34:12 -0700235 }
Phil Carmodyb1b5f652011-03-22 16:34:15 -0700236 /*
237 * If we incremented every single time possible, presume we've
238 * massively underestimated initially, and retry with a higher
239 * start, and larger range. (Only seen on x86_64, due to SMIs)
240 */
241 if (lpj + loopadd * 2 == lpj_base + loopadd_base * 2) {
242 lpj_base = lpj;
243 loopadd_base <<= 2;
244 goto recalibrate;
245 }
Phil Carmody71c696b2011-03-22 16:34:12 -0700246
247 return lpj;
248}
249
Sameer Nanda7afe1842011-07-25 17:13:29 -0700250static DEFINE_PER_CPU(unsigned long, cpu_loops_per_jiffy) = { 0 };
251
Jack Steinerb5652012011-11-15 15:33:56 -0800252/*
253 * Check if cpu calibration delay is already known. For example,
254 * some processors with multi-core sockets may have all cores
255 * with the same calibration delay.
256 *
257 * Architectures should override this function if a faster calibration
258 * method is available.
259 */
Paul Gortmaker0db06282013-06-19 14:53:51 -0400260unsigned long __attribute__((weak)) calibrate_delay_is_known(void)
Jack Steinerb5652012011-11-15 15:33:56 -0800261{
262 return 0;
263}
264
Peter De Schrijvere6639112014-06-12 18:58:27 +0300265/*
266 * Indicate the cpu delay calibration is done. This can be used by
267 * architectures to stop accepting delay timer registrations after this point.
268 */
269
270void __attribute__((weak)) calibration_delay_done(void)
271{
272}
273
Paul Gortmaker0db06282013-06-19 14:53:51 -0400274void calibrate_delay(void)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700275{
Russell King1b19ca92011-06-22 11:55:50 +0100276 unsigned long lpj;
Mike Travisfeae3202009-11-17 18:22:13 -0600277 static bool printed;
Sameer Nanda7afe1842011-07-25 17:13:29 -0700278 int this_cpu = smp_processor_id();
Linus Torvalds1da177e2005-04-16 15:20:36 -0700279
Sameer Nanda7afe1842011-07-25 17:13:29 -0700280 if (per_cpu(cpu_loops_per_jiffy, this_cpu)) {
281 lpj = per_cpu(cpu_loops_per_jiffy, this_cpu);
Diwakar Tundlam8595c532012-03-23 15:02:28 -0700282 if (!printed)
283 pr_info("Calibrating delay loop (skipped) "
Sameer Nanda7afe1842011-07-25 17:13:29 -0700284 "already calibrated this CPU");
285 } else if (preset_lpj) {
Russell King1b19ca92011-06-22 11:55:50 +0100286 lpj = preset_lpj;
Mike Travisfeae3202009-11-17 18:22:13 -0600287 if (!printed)
288 pr_info("Calibrating delay loop (skipped) "
289 "preset value.. ");
290 } else if ((!printed) && lpj_fine) {
Russell King1b19ca92011-06-22 11:55:50 +0100291 lpj = lpj_fine;
Mike Travisfeae3202009-11-17 18:22:13 -0600292 pr_info("Calibrating delay loop (skipped), "
Alok Katariaf3f31492008-06-23 18:21:56 -0700293 "value calculated using timer frequency.. ");
Jack Steinerb5652012011-11-15 15:33:56 -0800294 } else if ((lpj = calibrate_delay_is_known())) {
295 ;
Russell King1b19ca92011-06-22 11:55:50 +0100296 } else if ((lpj = calibrate_delay_direct()) != 0) {
Mike Travisfeae3202009-11-17 18:22:13 -0600297 if (!printed)
298 pr_info("Calibrating delay using timer "
299 "specific routine.. ");
Linus Torvalds1da177e2005-04-16 15:20:36 -0700300 } else {
Mike Travisfeae3202009-11-17 18:22:13 -0600301 if (!printed)
302 pr_info("Calibrating delay loop... ");
Russell King1b19ca92011-06-22 11:55:50 +0100303 lpj = calibrate_delay_converge();
Linus Torvalds1da177e2005-04-16 15:20:36 -0700304 }
Sameer Nanda7afe1842011-07-25 17:13:29 -0700305 per_cpu(cpu_loops_per_jiffy, this_cpu) = lpj;
Mike Travisfeae3202009-11-17 18:22:13 -0600306 if (!printed)
307 pr_cont("%lu.%02lu BogoMIPS (lpj=%lu)\n",
Russell King1b19ca92011-06-22 11:55:50 +0100308 lpj/(500000/HZ),
309 (lpj/(5000/HZ)) % 100, lpj);
Mike Travisfeae3202009-11-17 18:22:13 -0600310
Russell King1b19ca92011-06-22 11:55:50 +0100311 loops_per_jiffy = lpj;
Mike Travisfeae3202009-11-17 18:22:13 -0600312 printed = true;
Peter De Schrijvere6639112014-06-12 18:58:27 +0300313
314 calibration_delay_done();
Linus Torvalds1da177e2005-04-16 15:20:36 -0700315}