blob: 510d1ce7d4d23287fe866d2fd6649d5d66215fb5 [file] [log] [blame]
Stephen Hemmingeraaa248f2006-10-17 00:09:42 -07001/*
Daniel Borkmannd3d47eb2014-04-03 14:49:08 -07002 * This is a maximally equidistributed combined Tausworthe generator
3 * based on code from GNU Scientific Library 1.5 (30 Jun 2004)
4 *
5 * lfsr113 version:
6 *
7 * x_n = (s1_n ^ s2_n ^ s3_n ^ s4_n)
8 *
9 * s1_{n+1} = (((s1_n & 4294967294) << 18) ^ (((s1_n << 6) ^ s1_n) >> 13))
10 * s2_{n+1} = (((s2_n & 4294967288) << 2) ^ (((s2_n << 2) ^ s2_n) >> 27))
11 * s3_{n+1} = (((s3_n & 4294967280) << 7) ^ (((s3_n << 13) ^ s3_n) >> 21))
12 * s4_{n+1} = (((s4_n & 4294967168) << 13) ^ (((s4_n << 3) ^ s4_n) >> 12))
13 *
14 * The period of this generator is about 2^113 (see erratum paper).
15 *
16 * From: P. L'Ecuyer, "Maximally Equidistributed Combined Tausworthe
17 * Generators", Mathematics of Computation, 65, 213 (1996), 203--213:
18 * http://www.iro.umontreal.ca/~lecuyer/myftp/papers/tausme.ps
19 * ftp://ftp.iro.umontreal.ca/pub/simulation/lecuyer/papers/tausme.ps
20 *
21 * There is an erratum in the paper "Tables of Maximally Equidistributed
22 * Combined LFSR Generators", Mathematics of Computation, 68, 225 (1999),
23 * 261--269: http://www.iro.umontreal.ca/~lecuyer/myftp/papers/tausme2.ps
24 *
25 * ... the k_j most significant bits of z_j must be non-zero,
26 * for each j. (Note: this restriction also applies to the
27 * computer code given in [4], but was mistakenly not mentioned
28 * in that paper.)
29 *
30 * This affects the seeding procedure by imposing the requirement
31 * s1 > 1, s2 > 7, s3 > 15, s4 > 127.
32 */
Stephen Hemmingeraaa248f2006-10-17 00:09:42 -070033
34#include <linux/types.h>
35#include <linux/percpu.h>
Paul Gortmaker8bc3bcc2011-11-16 21:29:17 -050036#include <linux/export.h>
Al Virof6a57032006-10-18 01:47:25 -040037#include <linux/jiffies.h>
Stephen Hemmingeraaa248f2006-10-17 00:09:42 -070038#include <linux/random.h>
Daniel Borkmanna6a9c0f2013-11-11 12:20:37 +010039#include <linux/sched.h>
Daniel Borkmanna98406e2014-08-23 17:03:28 +020040#include <asm/unaligned.h>
Daniel Borkmanna6a9c0f2013-11-11 12:20:37 +010041
42#ifdef CONFIG_RANDOM32_SELFTEST
43static void __init prandom_state_selftest(void);
Hannes Frederic Sowa4ada97a2014-07-28 14:01:38 +020044#else
45static inline void prandom_state_selftest(void)
46{
47}
Daniel Borkmanna6a9c0f2013-11-11 12:20:37 +010048#endif
Stephen Hemmingeraaa248f2006-10-17 00:09:42 -070049
Stephen Hemmingeraaa248f2006-10-17 00:09:42 -070050static DEFINE_PER_CPU(struct rnd_state, net_rand_state);
51
Joe Eykholt59601642010-05-26 14:44:13 -070052/**
Akinobu Mita496f2f92012-12-17 16:04:23 -080053 * prandom_u32_state - seeded pseudo-random number generator.
Joe Eykholt59601642010-05-26 14:44:13 -070054 * @state: pointer to state structure holding seeded state.
55 *
56 * This is used for pseudo-randomness with no outside seeding.
Akinobu Mita496f2f92012-12-17 16:04:23 -080057 * For more random results, use prandom_u32().
Joe Eykholt59601642010-05-26 14:44:13 -070058 */
Akinobu Mita496f2f92012-12-17 16:04:23 -080059u32 prandom_u32_state(struct rnd_state *state)
Stephen Hemmingeraaa248f2006-10-17 00:09:42 -070060{
Hannes Frederic Sowa4ada97a2014-07-28 14:01:38 +020061#define TAUSWORTHE(s, a, b, c, d) ((s & c) << d) ^ (((s << a) ^ s) >> b)
Daniel Borkmanna98814c2013-11-11 12:20:36 +010062 state->s1 = TAUSWORTHE(state->s1, 6U, 13U, 4294967294U, 18U);
63 state->s2 = TAUSWORTHE(state->s2, 2U, 27U, 4294967288U, 2U);
64 state->s3 = TAUSWORTHE(state->s3, 13U, 21U, 4294967280U, 7U);
65 state->s4 = TAUSWORTHE(state->s4, 3U, 12U, 4294967168U, 13U);
Stephen Hemmingeraaa248f2006-10-17 00:09:42 -070066
Daniel Borkmanna98814c2013-11-11 12:20:36 +010067 return (state->s1 ^ state->s2 ^ state->s3 ^ state->s4);
Stephen Hemmingeraaa248f2006-10-17 00:09:42 -070068}
Akinobu Mita496f2f92012-12-17 16:04:23 -080069EXPORT_SYMBOL(prandom_u32_state);
Stephen Hemmingeraaa248f2006-10-17 00:09:42 -070070
71/**
Akinobu Mita496f2f92012-12-17 16:04:23 -080072 * prandom_u32 - pseudo random number generator
Stephen Hemmingeraaa248f2006-10-17 00:09:42 -070073 *
74 * A 32 bit pseudo-random number is generated using a fast
75 * algorithm suitable for simulation. This algorithm is NOT
76 * considered safe for cryptographic use.
77 */
Akinobu Mita496f2f92012-12-17 16:04:23 -080078u32 prandom_u32(void)
Stephen Hemmingeraaa248f2006-10-17 00:09:42 -070079{
Stephen Hemmingeraaa248f2006-10-17 00:09:42 -070080 struct rnd_state *state = &get_cpu_var(net_rand_state);
Daniel Borkmannd3d47eb2014-04-03 14:49:08 -070081 u32 res;
82
83 res = prandom_u32_state(state);
Stephen Hemmingeraaa248f2006-10-17 00:09:42 -070084 put_cpu_var(state);
Daniel Borkmannd3d47eb2014-04-03 14:49:08 -070085
86 return res;
Stephen Hemmingeraaa248f2006-10-17 00:09:42 -070087}
Akinobu Mita496f2f92012-12-17 16:04:23 -080088EXPORT_SYMBOL(prandom_u32);
Stephen Hemmingeraaa248f2006-10-17 00:09:42 -070089
Daniel Borkmannd3d47eb2014-04-03 14:49:08 -070090/**
Akinobu Mita6582c662012-12-17 16:04:25 -080091 * prandom_bytes_state - get the requested number of pseudo-random bytes
92 *
93 * @state: pointer to state structure holding seeded state.
94 * @buf: where to copy the pseudo-random bytes to
95 * @bytes: the requested number of bytes
96 *
97 * This is used for pseudo-randomness with no outside seeding.
98 * For more random results, use prandom_bytes().
99 */
Daniel Borkmanna98406e2014-08-23 17:03:28 +0200100void prandom_bytes_state(struct rnd_state *state, void *buf, size_t bytes)
Akinobu Mita6582c662012-12-17 16:04:25 -0800101{
Daniel Borkmanna98406e2014-08-23 17:03:28 +0200102 u8 *ptr = buf;
Akinobu Mita6582c662012-12-17 16:04:25 -0800103
Daniel Borkmanna98406e2014-08-23 17:03:28 +0200104 while (bytes >= sizeof(u32)) {
105 put_unaligned(prandom_u32_state(state), (u32 *) ptr);
106 ptr += sizeof(u32);
107 bytes -= sizeof(u32);
Akinobu Mita6582c662012-12-17 16:04:25 -0800108 }
Akinobu Mita6582c662012-12-17 16:04:25 -0800109
Daniel Borkmanna98406e2014-08-23 17:03:28 +0200110 if (bytes > 0) {
111 u32 rem = prandom_u32_state(state);
112 do {
113 *ptr++ = (u8) rem;
114 bytes--;
115 rem >>= BITS_PER_BYTE;
116 } while (bytes > 0);
Akinobu Mita6582c662012-12-17 16:04:25 -0800117 }
118}
119EXPORT_SYMBOL(prandom_bytes_state);
120
121/**
122 * prandom_bytes - get the requested number of pseudo-random bytes
123 * @buf: where to copy the pseudo-random bytes to
124 * @bytes: the requested number of bytes
125 */
Daniel Borkmanna98406e2014-08-23 17:03:28 +0200126void prandom_bytes(void *buf, size_t bytes)
Akinobu Mita6582c662012-12-17 16:04:25 -0800127{
128 struct rnd_state *state = &get_cpu_var(net_rand_state);
129
130 prandom_bytes_state(state, buf, bytes);
131 put_cpu_var(state);
132}
133EXPORT_SYMBOL(prandom_bytes);
134
Daniel Borkmanna98814c2013-11-11 12:20:36 +0100135static void prandom_warmup(struct rnd_state *state)
136{
Daniel Borkmanna98406e2014-08-23 17:03:28 +0200137 /* Calling RNG ten times to satisfy recurrence condition */
Daniel Borkmanna98814c2013-11-11 12:20:36 +0100138 prandom_u32_state(state);
139 prandom_u32_state(state);
140 prandom_u32_state(state);
141 prandom_u32_state(state);
142 prandom_u32_state(state);
143 prandom_u32_state(state);
144 prandom_u32_state(state);
145 prandom_u32_state(state);
146 prandom_u32_state(state);
147 prandom_u32_state(state);
148}
149
Hannes Frederic Sowa4ada97a2014-07-28 14:01:38 +0200150static u32 __extract_hwseed(void)
Daniel Borkmanna6a9c0f2013-11-11 12:20:37 +0100151{
Daniel Borkmanna98406e2014-08-23 17:03:28 +0200152 unsigned int val = 0;
Hannes Frederic Sowa4ada97a2014-07-28 14:01:38 +0200153
154 (void)(arch_get_random_seed_int(&val) ||
155 arch_get_random_int(&val));
156
157 return val;
158}
159
160static void prandom_seed_early(struct rnd_state *state, u32 seed,
161 bool mix_with_hwseed)
162{
163#define LCG(x) ((x) * 69069U) /* super-duper LCG */
164#define HWSEED() (mix_with_hwseed ? __extract_hwseed() : 0)
165 state->s1 = __seed(HWSEED() ^ LCG(seed), 2U);
166 state->s2 = __seed(HWSEED() ^ LCG(state->s1), 8U);
167 state->s3 = __seed(HWSEED() ^ LCG(state->s2), 16U);
168 state->s4 = __seed(HWSEED() ^ LCG(state->s3), 128U);
Daniel Borkmanna6a9c0f2013-11-11 12:20:37 +0100169}
170
Stephen Hemmingeraaa248f2006-10-17 00:09:42 -0700171/**
Akinobu Mita496f2f92012-12-17 16:04:23 -0800172 * prandom_seed - add entropy to pseudo random number generator
Stephen Hemmingeraaa248f2006-10-17 00:09:42 -0700173 * @seed: seed value
174 *
Akinobu Mita496f2f92012-12-17 16:04:23 -0800175 * Add some additional seeding to the prandom pool.
Stephen Hemmingeraaa248f2006-10-17 00:09:42 -0700176 */
Akinobu Mita496f2f92012-12-17 16:04:23 -0800177void prandom_seed(u32 entropy)
Stephen Hemmingeraaa248f2006-10-17 00:09:42 -0700178{
Andi Kleen61407f82008-04-03 14:07:02 -0700179 int i;
180 /*
181 * No locking on the CPUs, but then somewhat random results are, well,
182 * expected.
183 */
Daniel Borkmann0dd50d12015-10-08 01:20:37 +0200184 for_each_possible_cpu(i) {
Andi Kleen61407f82008-04-03 14:07:02 -0700185 struct rnd_state *state = &per_cpu(net_rand_state, i);
Daniel Borkmanna98814c2013-11-11 12:20:36 +0100186
187 state->s1 = __seed(state->s1 ^ entropy, 2U);
188 prandom_warmup(state);
Andi Kleen61407f82008-04-03 14:07:02 -0700189 }
Stephen Hemmingeraaa248f2006-10-17 00:09:42 -0700190}
Akinobu Mita496f2f92012-12-17 16:04:23 -0800191EXPORT_SYMBOL(prandom_seed);
Stephen Hemmingeraaa248f2006-10-17 00:09:42 -0700192
193/*
194 * Generate some initially weak seeding values to allow
Akinobu Mita496f2f92012-12-17 16:04:23 -0800195 * to start the prandom_u32() engine.
Stephen Hemmingeraaa248f2006-10-17 00:09:42 -0700196 */
Akinobu Mita496f2f92012-12-17 16:04:23 -0800197static int __init prandom_init(void)
Stephen Hemmingeraaa248f2006-10-17 00:09:42 -0700198{
199 int i;
200
Daniel Borkmanna6a9c0f2013-11-11 12:20:37 +0100201 prandom_state_selftest();
Daniel Borkmanna6a9c0f2013-11-11 12:20:37 +0100202
Stephen Hemmingeraaa248f2006-10-17 00:09:42 -0700203 for_each_possible_cpu(i) {
Daniel Borkmann0dd50d12015-10-08 01:20:37 +0200204 struct rnd_state *state = &per_cpu(net_rand_state, i);
Hannes Frederic Sowa4ada97a2014-07-28 14:01:38 +0200205 u32 weak_seed = (i + jiffies) ^ random_get_entropy();
Stephen Hemminger697f8d02008-07-30 16:29:19 -0700206
Hannes Frederic Sowa4ada97a2014-07-28 14:01:38 +0200207 prandom_seed_early(state, weak_seed, true);
Daniel Borkmanna98814c2013-11-11 12:20:36 +0100208 prandom_warmup(state);
Stephen Hemmingeraaa248f2006-10-17 00:09:42 -0700209 }
Daniel Borkmannd3d47eb2014-04-03 14:49:08 -0700210
Stephen Hemmingeraaa248f2006-10-17 00:09:42 -0700211 return 0;
212}
Akinobu Mita496f2f92012-12-17 16:04:23 -0800213core_initcall(prandom_init);
Stephen Hemmingeraaa248f2006-10-17 00:09:42 -0700214
Hannes Frederic Sowa6d319202013-11-11 12:20:33 +0100215static void __prandom_timer(unsigned long dontcare);
Hannes Frederic Sowa4ada97a2014-07-28 14:01:38 +0200216
Hannes Frederic Sowa6d319202013-11-11 12:20:33 +0100217static DEFINE_TIMER(seed_timer, __prandom_timer, 0, 0);
218
219static void __prandom_timer(unsigned long dontcare)
220{
221 u32 entropy;
Daniel Borkmann0125737a2013-11-12 23:45:42 +0100222 unsigned long expires;
Hannes Frederic Sowa6d319202013-11-11 12:20:33 +0100223
224 get_random_bytes(&entropy, sizeof(entropy));
225 prandom_seed(entropy);
Daniel Borkmann0125737a2013-11-12 23:45:42 +0100226
Hannes Frederic Sowa6d319202013-11-11 12:20:33 +0100227 /* reseed every ~60 seconds, in [40 .. 80) interval with slack */
Daniel Borkmanna98406e2014-08-23 17:03:28 +0200228 expires = 40 + prandom_u32_max(40);
Daniel Borkmann0125737a2013-11-12 23:45:42 +0100229 seed_timer.expires = jiffies + msecs_to_jiffies(expires * MSEC_PER_SEC);
230
Hannes Frederic Sowa6d319202013-11-11 12:20:33 +0100231 add_timer(&seed_timer);
232}
233
Daniel Borkmann66b25142013-11-12 23:45:41 +0100234static void __init __prandom_start_seed_timer(void)
Hannes Frederic Sowa6d319202013-11-11 12:20:33 +0100235{
236 set_timer_slack(&seed_timer, HZ);
Daniel Borkmann0125737a2013-11-12 23:45:42 +0100237 seed_timer.expires = jiffies + msecs_to_jiffies(40 * MSEC_PER_SEC);
Hannes Frederic Sowa6d319202013-11-11 12:20:33 +0100238 add_timer(&seed_timer);
239}
240
Daniel Borkmann897ece52015-10-08 01:20:38 +0200241void prandom_seed_full_state(struct rnd_state __percpu *pcpu_state)
Daniel Borkmann0dd50d12015-10-08 01:20:37 +0200242{
243 int i;
244
245 for_each_possible_cpu(i) {
246 struct rnd_state *state = per_cpu_ptr(pcpu_state, i);
247 u32 seeds[4];
248
249 get_random_bytes(&seeds, sizeof(seeds));
250 state->s1 = __seed(seeds[0], 2U);
251 state->s2 = __seed(seeds[1], 8U);
252 state->s3 = __seed(seeds[2], 16U);
253 state->s4 = __seed(seeds[3], 128U);
254
255 prandom_warmup(state);
256 }
257}
Florian Westphalb07edbe2016-02-16 17:24:08 +0100258EXPORT_SYMBOL(prandom_seed_full_state);
Daniel Borkmann0dd50d12015-10-08 01:20:37 +0200259
Stephen Hemmingeraaa248f2006-10-17 00:09:42 -0700260/*
261 * Generate better values after random number generator
Uwe Kleine-König421f91d2010-06-11 12:17:00 +0200262 * is fully initialized.
Stephen Hemmingeraaa248f2006-10-17 00:09:42 -0700263 */
Hannes Frederic Sowa4af712e2013-11-11 12:20:34 +0100264static void __prandom_reseed(bool late)
Stephen Hemmingeraaa248f2006-10-17 00:09:42 -0700265{
Hannes Frederic Sowa4af712e2013-11-11 12:20:34 +0100266 unsigned long flags;
267 static bool latch = false;
268 static DEFINE_SPINLOCK(lock);
269
Sasha Levin05efa8c2014-03-28 17:38:42 +0100270 /* Asking for random bytes might result in bytes getting
271 * moved into the nonblocking pool and thus marking it
272 * as initialized. In this case we would double back into
273 * this function and attempt to do a late reseed.
274 * Ignore the pointless attempt to reseed again if we're
275 * already waiting for bytes when the nonblocking pool
276 * got initialized.
277 */
278
Hannes Frederic Sowa4af712e2013-11-11 12:20:34 +0100279 /* only allow initial seeding (late == false) once */
Sasha Levin05efa8c2014-03-28 17:38:42 +0100280 if (!spin_trylock_irqsave(&lock, flags))
281 return;
282
Hannes Frederic Sowa4af712e2013-11-11 12:20:34 +0100283 if (latch && !late)
284 goto out;
Daniel Borkmannd3d47eb2014-04-03 14:49:08 -0700285
Hannes Frederic Sowa4af712e2013-11-11 12:20:34 +0100286 latch = true;
Daniel Borkmann0dd50d12015-10-08 01:20:37 +0200287 prandom_seed_full_state(&net_rand_state);
Hannes Frederic Sowa4af712e2013-11-11 12:20:34 +0100288out:
289 spin_unlock_irqrestore(&lock, flags);
290}
291
292void prandom_reseed_late(void)
293{
294 __prandom_reseed(true);
295}
296
297static int __init prandom_reseed(void)
298{
299 __prandom_reseed(false);
Daniel Borkmann66b25142013-11-12 23:45:41 +0100300 __prandom_start_seed_timer();
Stephen Hemmingeraaa248f2006-10-17 00:09:42 -0700301 return 0;
302}
Akinobu Mita496f2f92012-12-17 16:04:23 -0800303late_initcall(prandom_reseed);
Daniel Borkmanna6a9c0f2013-11-11 12:20:37 +0100304
305#ifdef CONFIG_RANDOM32_SELFTEST
306static struct prandom_test1 {
307 u32 seed;
308 u32 result;
309} test1[] = {
310 { 1U, 3484351685U },
311 { 2U, 2623130059U },
312 { 3U, 3125133893U },
313 { 4U, 984847254U },
314};
315
316static struct prandom_test2 {
317 u32 seed;
318 u32 iteration;
319 u32 result;
320} test2[] = {
321 /* Test cases against taus113 from GSL library. */
322 { 931557656U, 959U, 2975593782U },
323 { 1339693295U, 876U, 3887776532U },
324 { 1545556285U, 961U, 1615538833U },
325 { 601730776U, 723U, 1776162651U },
326 { 1027516047U, 687U, 511983079U },
327 { 416526298U, 700U, 916156552U },
328 { 1395522032U, 652U, 2222063676U },
329 { 366221443U, 617U, 2992857763U },
330 { 1539836965U, 714U, 3783265725U },
331 { 556206671U, 994U, 799626459U },
332 { 684907218U, 799U, 367789491U },
333 { 2121230701U, 931U, 2115467001U },
334 { 1668516451U, 644U, 3620590685U },
335 { 768046066U, 883U, 2034077390U },
336 { 1989159136U, 833U, 1195767305U },
337 { 536585145U, 996U, 3577259204U },
338 { 1008129373U, 642U, 1478080776U },
339 { 1740775604U, 939U, 1264980372U },
340 { 1967883163U, 508U, 10734624U },
341 { 1923019697U, 730U, 3821419629U },
342 { 442079932U, 560U, 3440032343U },
343 { 1961302714U, 845U, 841962572U },
344 { 2030205964U, 962U, 1325144227U },
345 { 1160407529U, 507U, 240940858U },
346 { 635482502U, 779U, 4200489746U },
347 { 1252788931U, 699U, 867195434U },
348 { 1961817131U, 719U, 668237657U },
349 { 1071468216U, 983U, 917876630U },
350 { 1281848367U, 932U, 1003100039U },
351 { 582537119U, 780U, 1127273778U },
352 { 1973672777U, 853U, 1071368872U },
353 { 1896756996U, 762U, 1127851055U },
354 { 847917054U, 500U, 1717499075U },
355 { 1240520510U, 951U, 2849576657U },
356 { 1685071682U, 567U, 1961810396U },
357 { 1516232129U, 557U, 3173877U },
358 { 1208118903U, 612U, 1613145022U },
359 { 1817269927U, 693U, 4279122573U },
360 { 1510091701U, 717U, 638191229U },
361 { 365916850U, 807U, 600424314U },
362 { 399324359U, 702U, 1803598116U },
363 { 1318480274U, 779U, 2074237022U },
364 { 697758115U, 840U, 1483639402U },
365 { 1696507773U, 840U, 577415447U },
366 { 2081979121U, 981U, 3041486449U },
367 { 955646687U, 742U, 3846494357U },
368 { 1250683506U, 749U, 836419859U },
369 { 595003102U, 534U, 366794109U },
370 { 47485338U, 558U, 3521120834U },
371 { 619433479U, 610U, 3991783875U },
372 { 704096520U, 518U, 4139493852U },
373 { 1712224984U, 606U, 2393312003U },
374 { 1318233152U, 922U, 3880361134U },
375 { 855572992U, 761U, 1472974787U },
376 { 64721421U, 703U, 683860550U },
377 { 678931758U, 840U, 380616043U },
378 { 692711973U, 778U, 1382361947U },
379 { 677703619U, 530U, 2826914161U },
380 { 92393223U, 586U, 1522128471U },
381 { 1222592920U, 743U, 3466726667U },
382 { 358288986U, 695U, 1091956998U },
383 { 1935056945U, 958U, 514864477U },
384 { 735675993U, 990U, 1294239989U },
385 { 1560089402U, 897U, 2238551287U },
386 { 70616361U, 829U, 22483098U },
387 { 368234700U, 731U, 2913875084U },
388 { 20221190U, 879U, 1564152970U },
389 { 539444654U, 682U, 1835141259U },
390 { 1314987297U, 840U, 1801114136U },
391 { 2019295544U, 645U, 3286438930U },
392 { 469023838U, 716U, 1637918202U },
393 { 1843754496U, 653U, 2562092152U },
394 { 400672036U, 809U, 4264212785U },
395 { 404722249U, 965U, 2704116999U },
396 { 600702209U, 758U, 584979986U },
397 { 519953954U, 667U, 2574436237U },
398 { 1658071126U, 694U, 2214569490U },
399 { 420480037U, 749U, 3430010866U },
400 { 690103647U, 969U, 3700758083U },
401 { 1029424799U, 937U, 3787746841U },
402 { 2012608669U, 506U, 3362628973U },
403 { 1535432887U, 998U, 42610943U },
404 { 1330635533U, 857U, 3040806504U },
405 { 1223800550U, 539U, 3954229517U },
406 { 1322411537U, 680U, 3223250324U },
407 { 1877847898U, 945U, 2915147143U },
408 { 1646356099U, 874U, 965988280U },
409 { 805687536U, 744U, 4032277920U },
410 { 1948093210U, 633U, 1346597684U },
411 { 392609744U, 783U, 1636083295U },
412 { 690241304U, 770U, 1201031298U },
413 { 1360302965U, 696U, 1665394461U },
414 { 1220090946U, 780U, 1316922812U },
415 { 447092251U, 500U, 3438743375U },
416 { 1613868791U, 592U, 828546883U },
417 { 523430951U, 548U, 2552392304U },
418 { 726692899U, 810U, 1656872867U },
419 { 1364340021U, 836U, 3710513486U },
420 { 1986257729U, 931U, 935013962U },
421 { 407983964U, 921U, 728767059U },
422};
423
424static void __init prandom_state_selftest(void)
425{
426 int i, j, errors = 0, runs = 0;
427 bool error = false;
428
429 for (i = 0; i < ARRAY_SIZE(test1); i++) {
430 struct rnd_state state;
431
Hannes Frederic Sowa4ada97a2014-07-28 14:01:38 +0200432 prandom_seed_early(&state, test1[i].seed, false);
Daniel Borkmanna6a9c0f2013-11-11 12:20:37 +0100433 prandom_warmup(&state);
434
435 if (test1[i].result != prandom_u32_state(&state))
436 error = true;
437 }
438
439 if (error)
440 pr_warn("prandom: seed boundary self test failed\n");
441 else
442 pr_info("prandom: seed boundary self test passed\n");
443
444 for (i = 0; i < ARRAY_SIZE(test2); i++) {
445 struct rnd_state state;
446
Hannes Frederic Sowa4ada97a2014-07-28 14:01:38 +0200447 prandom_seed_early(&state, test2[i].seed, false);
Daniel Borkmanna6a9c0f2013-11-11 12:20:37 +0100448 prandom_warmup(&state);
449
450 for (j = 0; j < test2[i].iteration - 1; j++)
451 prandom_u32_state(&state);
452
453 if (test2[i].result != prandom_u32_state(&state))
454 errors++;
455
456 runs++;
457 cond_resched();
458 }
459
460 if (errors)
461 pr_warn("prandom: %d/%d self tests failed\n", errors, runs);
462 else
463 pr_info("prandom: %d self tests passed\n", runs);
464}
465#endif