aboutsummaryrefslogtreecommitdiff
path: root/src
diff options
context:
space:
mode:
Diffstat (limited to 'src')
-rw-r--r--src/crypto/slow-hash.c398
1 files changed, 288 insertions, 110 deletions
diff --git a/src/crypto/slow-hash.c b/src/crypto/slow-hash.c
index 35438dcab..cda66af52 100644
--- a/src/crypto/slow-hash.c
+++ b/src/crypto/slow-hash.c
@@ -13,8 +13,9 @@
#include <emmintrin.h>
-#if defined(_MSC_VER) || defined(__INTEL_COMPILER)
+#if defined(_MSC_VER)
#include <intrin.h>
+#include <Windows.h>
#define STATIC
#define INLINE __inline
#if !defined(RDATA_ALIGN16)
@@ -22,6 +23,7 @@
#endif
#else
#include <wmmintrin.h>
+#include <sys/mman.h>
#define STATIC static
#define INLINE inline
#if !defined(RDATA_ALIGN16)
@@ -29,15 +31,58 @@
#endif
#endif
+#if defined(__INTEL_COMPILER)
+#define ASM __asm__
+#elif !defined(_MSC_VER)
+#define ASM __asm__
+#else
+#define ASM __asm
+#endif
+
#define MEMORY (1 << 21) // 2MB scratchpad
#define ITER (1 << 20)
#define AES_BLOCK_SIZE 16
#define AES_KEY_SIZE 32
#define INIT_SIZE_BLK 8
#define INIT_SIZE_BYTE (INIT_SIZE_BLK * AES_BLOCK_SIZE)
+#define TOTALBLOCKS (MEMORY / AES_BLOCK_SIZE)
#define U64(x) ((uint64_t *) (x))
#define R128(x) ((__m128i *) (x))
+#define SWAP(a, b) (((a) -= (b)), ((b) += (a)), ((a) = (b) - (a)))
+
+#define state_index(x) (((*((uint64_t *)x) >> 4) & (TOTALBLOCKS - 1)) << 4)
+#if defined(_MSC_VER)
+#define __mul() lo = _umul128(c[0], b[0], &hi);
+#else
+#define __mul() ASM("mulq %3\n\t" : "=d"(hi), "=a"(lo) : "%a" (c[0]), "rm" (b[0]) : "cc");
+#endif
+
+#define pre_aes() \
+ j = state_index(a); \
+ _c = _mm_load_si128(R128(&hp_state[j])); \
+ _a = _mm_load_si128(R128(a)); \
+
+// dga's optimized scratchpad twiddling
+#define post_aes() \
+ _mm_store_si128(R128(c), _c); \
+ _b = _mm_xor_si128(_b, _c); \
+ _mm_store_si128(R128(&hp_state[j]), _b); \
+ j = state_index(c); \
+ p = U64(&hp_state[j]); \
+ b[0] = p[0]; b[1] = p[1]; \
+ __mul(); \
+ a[0] += hi; a[1] += lo; \
+ p = U64(&hp_state[j]); \
+ p[0] = a[0]; p[1] = a[1]; \
+ a[0] ^= b[0]; a[1] ^= b[1]; \
+ _b = _c; \
+
+#if defined(_MSC_VER)
+#define THREADV __declspec(thread)
+#else
+#define THREADV __thread
+#endif
extern int aesb_single_round(const uint8_t *in, uint8_t*out, const uint8_t *expandedKey);
extern int aesb_pseudo_round(const uint8_t *in, uint8_t *out, const uint8_t *expandedKey);
@@ -54,59 +99,26 @@ union cn_slow_hash_state
};
#pragma pack(pop)
-#if defined(_MSC_VER) || defined(__INTEL_COMPILER)
+THREADV uint8_t *hp_state = NULL;
+THREADV int hp_allocated = 0;
+
+#if defined(_MSC_VER)
#define cpuid(info,x) __cpuidex(info,x,0)
#else
void cpuid(int CPUInfo[4], int InfoType)
{
- __asm__ __volatile__
+ ASM __volatile__
(
- "cpuid":
+ "cpuid":
"=a" (CPUInfo[0]),
"=b" (CPUInfo[1]),
"=c" (CPUInfo[2]),
"=d" (CPUInfo[3]) :
- "a" (InfoType), "c" (0)
- );
+ "a" (InfoType), "c" (0)
+ );
}
#endif
-STATIC INLINE void mul(const uint8_t *a, const uint8_t *b, uint8_t *res)
-{
- uint64_t a0, b0;
- uint64_t hi, lo;
-
- a0 = U64(a)[0];
- b0 = U64(b)[0];
- lo = mul128(a0, b0, &hi);
- U64(res)[0] = hi;
- U64(res)[1] = lo;
-}
-
-STATIC INLINE void sum_half_blocks(uint8_t *a, const uint8_t *b)
-{
- uint64_t a0, a1, b0, b1;
- a0 = U64(a)[0];
- a1 = U64(a)[1];
- b0 = U64(b)[0];
- b1 = U64(b)[1];
- a0 += b0;
- a1 += b1;
- U64(a)[0] = a0;
- U64(a)[1] = a1;
-}
-
-STATIC INLINE void swap_blocks(uint8_t *a, uint8_t *b)
-{
- uint64_t t[2];
- U64(t)[0] = U64(a)[0];
- U64(t)[1] = U64(a)[1];
- U64(a)[0] = U64(b)[0];
- U64(a)[1] = U64(b)[1];
- U64(b)[0] = U64(t)[0];
- U64(b)[1] = U64(t)[1];
-}
-
STATIC INLINE void xor_blocks(uint8_t *a, const uint8_t *b)
{
U64(a)[0] ^= U64(b)[0];
@@ -125,74 +137,248 @@ STATIC INLINE int check_aes_hw(void)
return supported = cpuid_results[2] & (1 << 25);
}
-STATIC INLINE void aesni_pseudo_round(const uint8_t *in, uint8_t *out,
- const uint8_t *expandedKey)
+STATIC INLINE void aes_256_assist1(__m128i* t1, __m128i * t2)
+{
+ __m128i t4;
+ *t2 = _mm_shuffle_epi32(*t2, 0xff);
+ t4 = _mm_slli_si128(*t1, 0x04);
+ *t1 = _mm_xor_si128(*t1, t4);
+ t4 = _mm_slli_si128(t4, 0x04);
+ *t1 = _mm_xor_si128(*t1, t4);
+ t4 = _mm_slli_si128(t4, 0x04);
+ *t1 = _mm_xor_si128(*t1, t4);
+ *t1 = _mm_xor_si128(*t1, *t2);
+}
+
+STATIC INLINE void aes_256_assist2(__m128i* t1, __m128i * t3)
+{
+ __m128i t2, t4;
+ t4 = _mm_aeskeygenassist_si128(*t1, 0x00);
+ t2 = _mm_shuffle_epi32(t4, 0xaa);
+ t4 = _mm_slli_si128(*t3, 0x04);
+ *t3 = _mm_xor_si128(*t3, t4);
+ t4 = _mm_slli_si128(t4, 0x04);
+ *t3 = _mm_xor_si128(*t3, t4);
+ t4 = _mm_slli_si128(t4, 0x04);
+ *t3 = _mm_xor_si128(*t3, t4);
+ *t3 = _mm_xor_si128(*t3, t2);
+}
+
+STATIC INLINE void aes_expand_key(const uint8_t *key, uint8_t *expandedKey)
+{
+ __m128i *ek = R128(expandedKey);
+ __m128i t1, t2, t3;
+
+ t1 = _mm_loadu_si128(R128(key));
+ t3 = _mm_loadu_si128(R128(key + 16));
+
+ ek[0] = t1;
+ ek[1] = t3;
+
+ t2 = _mm_aeskeygenassist_si128(t3, 0x01);
+ aes_256_assist1(&t1, &t2);
+ ek[2] = t1;
+ aes_256_assist2(&t1, &t3);
+ ek[3] = t3;
+
+ t2 = _mm_aeskeygenassist_si128(t3, 0x02);
+ aes_256_assist1(&t1, &t2);
+ ek[4] = t1;
+ aes_256_assist2(&t1, &t3);
+ ek[5] = t3;
+
+ t2 = _mm_aeskeygenassist_si128(t3, 0x04);
+ aes_256_assist1(&t1, &t2);
+ ek[6] = t1;
+ aes_256_assist2(&t1, &t3);
+ ek[7] = t3;
+
+ t2 = _mm_aeskeygenassist_si128(t3, 0x08);
+ aes_256_assist1(&t1, &t2);
+ ek[8] = t1;
+ aes_256_assist2(&t1, &t3);
+ ek[9] = t3;
+
+ t2 = _mm_aeskeygenassist_si128(t3, 0x10);
+ aes_256_assist1(&t1, &t2);
+ ek[10] = t1;
+}
+
+STATIC INLINE void aes_pseudo_round(const uint8_t *in, uint8_t *out,
+ const uint8_t *expandedKey, int nblocks)
+{
+ __m128i *k = R128(expandedKey);
+ __m128i d;
+ int i;
+
+ for(i = 0; i < nblocks; i++)
+ {
+ d = _mm_loadu_si128(R128(in + i * AES_BLOCK_SIZE));
+ d = _mm_aesenc_si128(d, *R128(&k[0]));
+ d = _mm_aesenc_si128(d, *R128(&k[1]));
+ d = _mm_aesenc_si128(d, *R128(&k[2]));
+ d = _mm_aesenc_si128(d, *R128(&k[3]));
+ d = _mm_aesenc_si128(d, *R128(&k[4]));
+ d = _mm_aesenc_si128(d, *R128(&k[5]));
+ d = _mm_aesenc_si128(d, *R128(&k[6]));
+ d = _mm_aesenc_si128(d, *R128(&k[7]));
+ d = _mm_aesenc_si128(d, *R128(&k[8]));
+ d = _mm_aesenc_si128(d, *R128(&k[9]));
+ _mm_storeu_si128((R128(out + i * AES_BLOCK_SIZE)), d);
+ }
+}
+
+STATIC INLINE void aes_pseudo_round_xor(const uint8_t *in, uint8_t *out,
+ const uint8_t *expandedKey, const uint8_t *xor, int nblocks)
{
__m128i *k = R128(expandedKey);
+ __m128i *x = R128(xor);
__m128i d;
+ int i;
+
+ for(i = 0; i < nblocks; i++)
+ {
+ d = _mm_loadu_si128(R128(in + i * AES_BLOCK_SIZE));
+ d = _mm_xor_si128(d, *R128(x++));
+ d = _mm_aesenc_si128(d, *R128(&k[0]));
+ d = _mm_aesenc_si128(d, *R128(&k[1]));
+ d = _mm_aesenc_si128(d, *R128(&k[2]));
+ d = _mm_aesenc_si128(d, *R128(&k[3]));
+ d = _mm_aesenc_si128(d, *R128(&k[4]));
+ d = _mm_aesenc_si128(d, *R128(&k[5]));
+ d = _mm_aesenc_si128(d, *R128(&k[6]));
+ d = _mm_aesenc_si128(d, *R128(&k[7]));
+ d = _mm_aesenc_si128(d, *R128(&k[8]));
+ d = _mm_aesenc_si128(d, *R128(&k[9]));
+ _mm_storeu_si128((R128(out + i * AES_BLOCK_SIZE)), d);
+ }
+}
+
+#if defined(_MSC_VER)
+BOOL SetLockPagesPrivilege(HANDLE hProcess, BOOL bEnable)
+{
+ struct
+ {
+ DWORD count;
+ LUID_AND_ATTRIBUTES privilege[1];
+ } info;
+
+ HANDLE token;
+ if(!OpenProcessToken(hProcess, TOKEN_ADJUST_PRIVILEGES, &token))
+ return FALSE;
+
+ info.count = 1;
+ info.privilege[0].Attributes = bEnable ? SE_PRIVILEGE_ENABLED : 0;
- d = _mm_loadu_si128(R128(in));
- d = _mm_aesenc_si128(d, *R128(&k[0]));
- d = _mm_aesenc_si128(d, *R128(&k[1]));
- d = _mm_aesenc_si128(d, *R128(&k[2]));
- d = _mm_aesenc_si128(d, *R128(&k[3]));
- d = _mm_aesenc_si128(d, *R128(&k[4]));
- d = _mm_aesenc_si128(d, *R128(&k[5]));
- d = _mm_aesenc_si128(d, *R128(&k[6]));
- d = _mm_aesenc_si128(d, *R128(&k[7]));
- d = _mm_aesenc_si128(d, *R128(&k[8]));
- d = _mm_aesenc_si128(d, *R128(&k[9]));
- _mm_storeu_si128((R128(out)), d);
+ if(!LookupPrivilegeValue(NULL, SE_LOCK_MEMORY_NAME, &(info.privilege[0].Luid)))
+ return FALSE;
+
+ if(!AdjustTokenPrivileges(token, FALSE, (PTOKEN_PRIVILEGES) &info, 0, NULL, NULL))
+ return FALSE;
+
+ if (GetLastError() != ERROR_SUCCESS)
+ return FALSE;
+
+ CloseHandle(token);
+
+ return TRUE;
+
+}
+#endif
+
+void slow_hash_allocate_state(void)
+{
+ int state = 0;
+ if(hp_state != NULL)
+ return;
+
+#if defined(_MSC_VER)
+ SetLockPagesPrivilege(GetCurrentProcess(), TRUE);
+ hp_state = (uint8_t *) VirtualAlloc(hp_state, MEMORY, MEM_LARGE_PAGES |
+ MEM_COMMIT | MEM_RESERVE, PAGE_READWRITE);
+#else
+ hp_state = mmap(0, MEMORY, PROT_READ | PROT_WRITE,
+ MAP_PRIVATE | MAP_ANONYMOUS | MAP_HUGETLB, 0, 0);
+ if(hp_state == MAP_FAILED)
+ hp_state = NULL;
+#endif
+ hp_allocated = 1;
+ if(hp_state == NULL)
+ {
+ hp_allocated = 0;
+ hp_state = (uint8_t *) malloc(MEMORY);
+ }
+}
+
+void slow_hash_free_state(void)
+{
+ if(hp_state == NULL)
+ return;
+
+ if(!hp_allocated)
+ free(hp_state);
+ else
+ {
+#if defined(_MSC_VER)
+ VirtualFree(hp_state, MEMORY, MEM_RELEASE);
+#else
+ munmap(hp_state, MEMORY);
+#endif
+ }
+
+ hp_state = NULL;
+ hp_allocated = 0;
}
void cn_slow_hash(const void *data, size_t length, char *hash)
{
- uint8_t long_state[MEMORY];
- uint8_t text[INIT_SIZE_BYTE];
- uint8_t a[AES_BLOCK_SIZE];
- uint8_t b[AES_BLOCK_SIZE];
- uint8_t d[AES_BLOCK_SIZE];
- uint8_t aes_key[AES_KEY_SIZE];
- RDATA_ALIGN16 uint8_t expandedKey[256];
+ RDATA_ALIGN16 uint8_t expandedKey[240];
+ uint8_t text[INIT_SIZE_BYTE];
+ RDATA_ALIGN16 uint64_t a[2];
+ RDATA_ALIGN16 uint64_t b[2];
+ RDATA_ALIGN16 uint64_t c[2];
+ RDATA_ALIGN16 uint8_t aes_key[AES_KEY_SIZE];
union cn_slow_hash_state state;
+ __m128i _a, _b, _c;
+ uint64_t hi, lo;
size_t i, j;
- uint8_t *p = NULL;
+ uint64_t *p = NULL;
oaes_ctx *aes_ctx;
-
int useAes = check_aes_hw();
+
static void (*const extra_hashes[4])(const void *, size_t, char *) =
{
hash_extra_blake, hash_extra_groestl, hash_extra_jh, hash_extra_skein
};
+ // this isn't supposed to happen, but guard against it for now.
+ if(hp_state == NULL)
+ slow_hash_allocate_state();
+
hash_process(&state.hs, data, length);
memcpy(text, state.init, INIT_SIZE_BYTE);
- aes_ctx = (oaes_ctx *) oaes_alloc();
- oaes_key_import_data(aes_ctx, state.hs.b, AES_KEY_SIZE);
-
- // use aligned data
- memcpy(expandedKey, aes_ctx->key->exp_data, aes_ctx->key->exp_data_len);
-
if(useAes)
{
+ aes_expand_key(state.hs.b, expandedKey);
for(i = 0; i < MEMORY / INIT_SIZE_BYTE; i++)
{
- for(j = 0; j < INIT_SIZE_BLK; j++)
- aesni_pseudo_round(&text[AES_BLOCK_SIZE * j], &text[AES_BLOCK_SIZE * j], expandedKey);
- memcpy(&long_state[i * INIT_SIZE_BYTE], text, INIT_SIZE_BYTE);
+ aes_pseudo_round(text, text, expandedKey, INIT_SIZE_BLK);
+ memcpy(&hp_state[i * INIT_SIZE_BYTE], text, INIT_SIZE_BYTE);
}
}
else
{
+ aes_ctx = (oaes_ctx *) oaes_alloc();
+ oaes_key_import_data(aes_ctx, state.hs.b, AES_KEY_SIZE);
for(i = 0; i < MEMORY / INIT_SIZE_BYTE; i++)
{
for(j = 0; j < INIT_SIZE_BLK; j++)
- aesb_pseudo_round(&text[AES_BLOCK_SIZE * j], &text[AES_BLOCK_SIZE * j], expandedKey);
+ aesb_pseudo_round(&text[AES_BLOCK_SIZE * j], &text[AES_BLOCK_SIZE * j], aes_ctx->key->exp_data);
- memcpy(&long_state[i * INIT_SIZE_BYTE], text, INIT_SIZE_BYTE);
+ memcpy(&hp_state[i * INIT_SIZE_BYTE], text, INIT_SIZE_BYTE);
}
}
@@ -201,60 +387,52 @@ void cn_slow_hash(const void *data, size_t length, char *hash)
U64(b)[0] = U64(&state.k[16])[0] ^ U64(&state.k[48])[0];
U64(b)[1] = U64(&state.k[16])[1] ^ U64(&state.k[48])[1];
- for(i = 0; i < ITER / 2; i++)
+ _b = _mm_load_si128(R128(b));
+ // this is ugly but the branching affects the loop somewhat so put it outside.
+ if(useAes)
{
- #define TOTALBLOCKS (MEMORY / AES_BLOCK_SIZE)
- #define state_index(x) (((*((uint64_t *)x) >> 4) & (TOTALBLOCKS - 1)) << 4)
-
- // Iteration 1
- p = &long_state[state_index(a)];
-
- if(useAes)
- _mm_storeu_si128(R128(p), _mm_aesenc_si128(_mm_loadu_si128(R128(p)), _mm_loadu_si128(R128(a))));
- else
- aesb_single_round(p, p, a);
-
- xor_blocks(b, p);
- swap_blocks(b, p);
- swap_blocks(a, b);
-
- // Iteration 2
- p = &long_state[state_index(a)];
-
- mul(a, p, d);
- sum_half_blocks(b, d);
- swap_blocks(b, p);
- xor_blocks(b, p);
- swap_blocks(a, b);
+ for(i = 0; i < ITER / 2; i++)
+ {
+ pre_aes();
+ _c = _mm_aesenc_si128(_c, _a);
+ // post_aes(), optimized scratchpad twiddling (credits to dga)
+ post_aes();
+ }
+ }
+ else
+ {
+ for(i = 0; i < ITER / 2; i++)
+ {
+ pre_aes();
+ aesb_single_round((uint8_t *) &_c, (uint8_t *) &_c, (uint8_t *) &_a);
+ post_aes();
+ }
}
memcpy(text, state.init, INIT_SIZE_BYTE);
- oaes_key_import_data(aes_ctx, &state.hs.b[32], AES_KEY_SIZE);
- memcpy(expandedKey, aes_ctx->key->exp_data, aes_ctx->key->exp_data_len);
if(useAes)
{
+ aes_expand_key(&state.hs.b[32], expandedKey);
for(i = 0; i < MEMORY / INIT_SIZE_BYTE; i++)
{
- for(j = 0; j < INIT_SIZE_BLK; j++)
- {
- xor_blocks(&text[j * AES_BLOCK_SIZE], &long_state[i * INIT_SIZE_BYTE + j * AES_BLOCK_SIZE]);
- aesni_pseudo_round(&text[j * AES_BLOCK_SIZE], &text[j * AES_BLOCK_SIZE], expandedKey);
- }
+ // add the xor to the pseudo round
+ aes_pseudo_round_xor(text, text, expandedKey, &hp_state[i * INIT_SIZE_BYTE], INIT_SIZE_BLK);
}
}
else
{
+ oaes_key_import_data(aes_ctx, &state.hs.b[32], AES_KEY_SIZE);
for(i = 0; i < MEMORY / INIT_SIZE_BYTE; i++)
{
for(j = 0; j < INIT_SIZE_BLK; j++)
{
- xor_blocks(&text[j * AES_BLOCK_SIZE], &long_state[i * INIT_SIZE_BYTE + j * AES_BLOCK_SIZE]);
- aesb_pseudo_round(&text[AES_BLOCK_SIZE * j], &text[AES_BLOCK_SIZE * j], expandedKey);
+ xor_blocks(&text[j * AES_BLOCK_SIZE], &hp_state[i * INIT_SIZE_BYTE + j * AES_BLOCK_SIZE]);
+ aesb_pseudo_round(&text[AES_BLOCK_SIZE * j], &text[AES_BLOCK_SIZE * j], aes_ctx->key->exp_data);
}
}
+ oaes_free((OAES_CTX **) &aes_ctx);
}
- oaes_free((OAES_CTX **) &aes_ctx);
memcpy(state.init, text, INIT_SIZE_BYTE);
hash_permutation(&state.hs);
extra_hashes[state.hs.b[0] & 3](&state, 200, hash);