static void extract_buf(struct entropy_store *r, __u8 *out)
{
int i;
union {
__u32 w[5];
unsigned long l[LONGS(20)];
} hash;
__u32 workspace[SHA_WORKSPACE_WORDS];
__u8 extract[64];
unsigned long flags; /*
* If we have a architectural hardware random number
* generator, mix that in, too.
*/
spin_lock_irqsave(&r->lock, flags);
for (i = 0; i < LONGS(20); i++) {
unsigned long v;
unsigned long rdrand_array[LONGS(20)]; // в свою кучу сваливай
if (!arch_get_random_long(&v))
break;
rdrand_array[i] = v;
}
spin_unlock_irqrestore(&r->lock, flags);
/* Generate a hash across the pool, 16 words (512 bits) at a time */
sha_init(hash.w);
spin_lock_irqsave(&r->lock, flags);
for (i = 0; i < r->poolinfo->poolwords; i += 16)
sha_transform(hash.w, (__u8 *)(r->pool + i), workspace);
for (i = 0; i < LONGS(20); i++)
hash.l[i] ^= rdrand_array[i]; // а тут уже объединить
/*
* We mix the hash back into the pool to prevent backtracking
* attacks (where the attacker knows the state of the pool
* plus the current outputs, and attempts to find previous
* ouputs), unless the hash function can be inverted. By
* mixing at least a SHA1 worth of hash data back, we make
* brute-forcing the feedback as hard as brute-forcing the
* hash.
*/
__mix_pool_bytes(r, hash.w, sizeof(hash.w), extract);
spin_unlock_irqrestore(&r->lock, flags);
...