|
@ -78,20 +78,10 @@ int cc20_crypt (unsigned char *out, const unsigned char *in, size_t in_len, |
|
|
#elif defined (__SSE2__) // SSE ----------------------------------------------------------
|
|
|
#elif defined (__SSE2__) // SSE ----------------------------------------------------------
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
// taken (and modified and enhanced) from
|
|
|
// taken (and heavily modified and enhanced) from
|
|
|
// https://github.com/Ginurx/chacha20-c (public domain)
|
|
|
// https://github.com/Ginurx/chacha20-c (public domain)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
static void cc20_init_block(cc20_context_t *ctx, const uint8_t nonce[]) { |
|
|
|
|
|
|
|
|
|
|
|
const uint8_t *magic_constant = (uint8_t*)"expand 32-byte k"; |
|
|
|
|
|
|
|
|
|
|
|
memcpy(&(ctx->state[ 0]), magic_constant, 16); |
|
|
|
|
|
memcpy(&(ctx->state[ 4]), ctx->key, CC20_KEY_BYTES); |
|
|
|
|
|
memcpy(&(ctx->state[12]), nonce, CC20_IV_SIZE); |
|
|
|
|
|
} |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
#define SL _mm_slli_epi32 |
|
|
#define SL _mm_slli_epi32 |
|
|
#define SR _mm_srli_epi32 |
|
|
#define SR _mm_srli_epi32 |
|
|
#define XOR _mm_xor_si128 |
|
|
#define XOR _mm_xor_si128 |
|
@ -99,15 +89,18 @@ static void cc20_init_block(cc20_context_t *ctx, const uint8_t nonce[]) { |
|
|
#define ADD _mm_add_epi32 |
|
|
#define ADD _mm_add_epi32 |
|
|
#define ROL(X,r) (XOR(SL(X,r),SR(X,(32-r)))) |
|
|
#define ROL(X,r) (XOR(SL(X,r),SR(X,(32-r)))) |
|
|
|
|
|
|
|
|
#if defined (__SSE3__) // --- SSE3
|
|
|
#define ONE _mm_setr_epi32(1, 0, 0, 0) |
|
|
|
|
|
#define TWO _mm_setr_epi32(2, 0, 0, 0) |
|
|
|
|
|
|
|
|
|
|
|
#if defined (__SSSE3__) // --- SSSE3
|
|
|
#define L8 _mm_set_epi32(0x0e0d0c0fL, 0x0a09080bL, 0x06050407L, 0x02010003L) |
|
|
#define L8 _mm_set_epi32(0x0e0d0c0fL, 0x0a09080bL, 0x06050407L, 0x02010003L) |
|
|
#define L16 _mm_set_epi32(0x0d0c0f0eL, 0x09080b0aL, 0x05040706L, 0x01000302L) |
|
|
#define L16 _mm_set_epi32(0x0d0c0f0eL, 0x09080b0aL, 0x05040706L, 0x01000302L) |
|
|
#define ROL8(X) ( _mm_shuffle_epi8(X, L8)) /* SSE 3 */ |
|
|
#define ROL8(X) ( _mm_shuffle_epi8(X, L8)) /* SSSE 3 */ |
|
|
#define ROL16(X) ( _mm_shuffle_epi8(X, L16)) /* SSE 3 */ |
|
|
#define ROL16(X) ( _mm_shuffle_epi8(X, L16)) /* SSSE 3 */ |
|
|
#else // --- regular SSE2 --------
|
|
|
#else // --- regular SSE2 ----------
|
|
|
#define ROL8(X) ROL(X,8) |
|
|
#define ROL8(X) ROL(X,8) |
|
|
#define ROL16(X) ROL(X,16) |
|
|
#define ROL16(X) ROL(X,16) |
|
|
#endif // ------------------------
|
|
|
#endif // --------------------------
|
|
|
|
|
|
|
|
|
#define CC20_PERMUTE_ROWS(A,B,C,D) \ |
|
|
#define CC20_PERMUTE_ROWS(A,B,C,D) \ |
|
|
B = _mm_shuffle_epi32(B, _MM_SHUFFLE(0, 3, 2, 1)); \ |
|
|
B = _mm_shuffle_epi32(B, _MM_SHUFFLE(0, 3, 2, 1)); \ |
|
@ -135,22 +128,57 @@ static void cc20_init_block(cc20_context_t *ctx, const uint8_t nonce[]) { |
|
|
CC20_ODD_ROUND (A, B, C, D); \ |
|
|
CC20_ODD_ROUND (A, B, C, D); \ |
|
|
CC20_EVEN_ROUND(A, B, C, D) |
|
|
CC20_EVEN_ROUND(A, B, C, D) |
|
|
|
|
|
|
|
|
static void cc20_block_next(cc20_context_t *ctx) { |
|
|
#define STOREXOR(O,I,X) \ |
|
|
|
|
|
_mm_storeu_si128 ((__m128i*)O, \ |
|
|
|
|
|
_mm_xor_si128 (_mm_loadu_si128((__m128i*)I), X)); \ |
|
|
|
|
|
I += 16; O += 16 \ |
|
|
|
|
|
|
|
|
uint32_t *counter = ctx->state + 12; |
|
|
int cc20_crypt (unsigned char *out, const unsigned char *in, size_t in_len, |
|
|
uint32_t cnt; |
|
|
const unsigned char *iv, cc20_context_t *ctx) { |
|
|
|
|
|
|
|
|
|
|
|
__m128i a, b, c, d, k0, k1, k2, k3, k4, k5, k6, k7; |
|
|
|
|
|
|
|
|
|
|
|
uint8_t *keystream8 = (uint8_t*)ctx->keystream32; |
|
|
|
|
|
|
|
|
|
|
|
const uint8_t *magic_constant = (uint8_t*)"expand 32-byte k"; |
|
|
|
|
|
|
|
|
|
|
|
a = _mm_loadu_si128 ((__m128i*)magic_constant); |
|
|
|
|
|
b = _mm_loadu_si128 ((__m128i*)(ctx->key)); |
|
|
|
|
|
c = _mm_loadu_si128 ( (__m128i*)((ctx->key)+16)); |
|
|
|
|
|
d = _mm_loadu_si128 ((__m128i*)iv); |
|
|
|
|
|
|
|
|
|
|
|
while (in_len >= 128) { |
|
|
|
|
|
|
|
|
|
|
|
k0 = a; k1 = b; k2 = c; k3 = d; |
|
|
|
|
|
k4 = a; k5 = b; k6 = c; k7 = ADD(d, ONE); |
|
|
|
|
|
|
|
|
|
|
|
// 10 double rounds -- in parallel to make better use of all 8 SSE registers
|
|
|
|
|
|
CC20_DOUBLE_ROUND(k0, k1, k2, k3); CC20_DOUBLE_ROUND(k4, k5, k6, k7); |
|
|
|
|
|
CC20_DOUBLE_ROUND(k0, k1, k2, k3); CC20_DOUBLE_ROUND(k4, k5, k6, k7); |
|
|
|
|
|
CC20_DOUBLE_ROUND(k0, k1, k2, k3); CC20_DOUBLE_ROUND(k4, k5, k6, k7); |
|
|
|
|
|
CC20_DOUBLE_ROUND(k0, k1, k2, k3); CC20_DOUBLE_ROUND(k4, k5, k6, k7); |
|
|
|
|
|
CC20_DOUBLE_ROUND(k0, k1, k2, k3); CC20_DOUBLE_ROUND(k4, k5, k6, k7); |
|
|
|
|
|
CC20_DOUBLE_ROUND(k0, k1, k2, k3); CC20_DOUBLE_ROUND(k4, k5, k6, k7); |
|
|
|
|
|
CC20_DOUBLE_ROUND(k0, k1, k2, k3); CC20_DOUBLE_ROUND(k4, k5, k6, k7); |
|
|
|
|
|
CC20_DOUBLE_ROUND(k0, k1, k2, k3); CC20_DOUBLE_ROUND(k4, k5, k6, k7); |
|
|
|
|
|
CC20_DOUBLE_ROUND(k0, k1, k2, k3); CC20_DOUBLE_ROUND(k4, k5, k6, k7); |
|
|
|
|
|
CC20_DOUBLE_ROUND(k0, k1, k2, k3); CC20_DOUBLE_ROUND(k4, k5, k6, k7); |
|
|
|
|
|
|
|
|
|
|
|
k0 = ADD(k0, a); k1 = ADD(k1, b); k2 = ADD(k2, c); k3 = ADD(k3, d); |
|
|
|
|
|
k4 = ADD(k4, a); k5 = ADD(k5, b); k6 = ADD(k6, c); k7 = ADD(k7, d); k7 = ADD(k7, ONE); |
|
|
|
|
|
|
|
|
__m128i a, b, c, d, k0, k1, k2, k3; |
|
|
STOREXOR(out, in, k0); STOREXOR(out, in, k1); STOREXOR(out, in, k2); STOREXOR(out, in, k3); |
|
|
|
|
|
STOREXOR(out, in, k4); STOREXOR(out, in, k5); STOREXOR(out, in, k6); STOREXOR(out, in, k7); |
|
|
|
|
|
|
|
|
a = _mm_loadu_si128 ((__m128i*)&(ctx->state[ 0])); |
|
|
// increment counter, make sure it is and stays little endian in memory
|
|
|
b = _mm_loadu_si128 ((__m128i*)&(ctx->state[ 4])); |
|
|
d = ADD(d, TWO); |
|
|
c = _mm_loadu_si128 ((__m128i*)&(ctx->state[ 8])); |
|
|
|
|
|
d = _mm_loadu_si128 ((__m128i*)&(ctx->state[12])); |
|
|
in_len -= 128; |
|
|
|
|
|
} |
|
|
|
|
|
|
|
|
|
|
|
if (in_len >= 64) { |
|
|
|
|
|
|
|
|
k0 = a; |
|
|
k0 = a; k1 = b; k2 = c; k3 = d; |
|
|
k1 = b; |
|
|
|
|
|
k2 = c; |
|
|
|
|
|
k3 = d; |
|
|
|
|
|
|
|
|
|
|
|
// 10 double rounds
|
|
|
// 10 double rounds
|
|
|
CC20_DOUBLE_ROUND(k0, k1, k2, k3); |
|
|
CC20_DOUBLE_ROUND(k0, k1, k2, k3); |
|
@ -164,87 +192,46 @@ static void cc20_block_next(cc20_context_t *ctx) { |
|
|
CC20_DOUBLE_ROUND(k0, k1, k2, k3); |
|
|
CC20_DOUBLE_ROUND(k0, k1, k2, k3); |
|
|
CC20_DOUBLE_ROUND(k0, k1, k2, k3); |
|
|
CC20_DOUBLE_ROUND(k0, k1, k2, k3); |
|
|
|
|
|
|
|
|
k0 = ADD(k0, a); |
|
|
k0 = ADD(k0, a); k1 = ADD(k1, b); k2 = ADD(k2, c); k3 = ADD(k3, d); |
|
|
k1 = ADD(k1, b); |
|
|
|
|
|
k2 = ADD(k2, c); |
|
|
|
|
|
k3 = ADD(k3, d); |
|
|
|
|
|
|
|
|
|
|
|
_mm_storeu_si128 ((__m128i*)&(ctx->keystream32[ 0]), k0); |
|
|
STOREXOR(out, in, k0); STOREXOR(out, in, k1); STOREXOR(out, in, k2); STOREXOR(out, in, k3); |
|
|
_mm_storeu_si128 ((__m128i*)&(ctx->keystream32[ 4]), k1); |
|
|
|
|
|
_mm_storeu_si128 ((__m128i*)&(ctx->keystream32[ 8]), k2); |
|
|
|
|
|
_mm_storeu_si128 ((__m128i*)&(ctx->keystream32[12]), k3); |
|
|
|
|
|
|
|
|
|
|
|
// increment counter, make sure it is and stays little endian in memory
|
|
|
// increment counter, make sure it is and stays little endian in memory
|
|
|
cnt = le32toh(counter[0]); |
|
|
d = ADD(d, ONE); |
|
|
counter[0] = htole32(++cnt); |
|
|
|
|
|
if(0 == counter[0]) { |
|
|
|
|
|
// wrap around occured, increment higher 32 bits of counter
|
|
|
|
|
|
// unlikely with 1,500 byte sized packets
|
|
|
|
|
|
cnt = le32toh(counter[1]); |
|
|
|
|
|
counter[1] = htole32(++cnt); |
|
|
|
|
|
if(0 == counter[1]) { |
|
|
|
|
|
// very unlikely
|
|
|
|
|
|
cnt = le32toh(counter[2]); |
|
|
|
|
|
counter[2] = htole32(++cnt); |
|
|
|
|
|
if(0 == counter[2]) { |
|
|
|
|
|
// extremely unlikely
|
|
|
|
|
|
cnt = le32toh(counter[3]); |
|
|
|
|
|
counter[3] = htole32(++cnt); |
|
|
|
|
|
} |
|
|
|
|
|
} |
|
|
|
|
|
} |
|
|
|
|
|
} |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
static void cc20_init_context(cc20_context_t *ctx, const uint8_t *nonce) { |
|
|
in_len -= 64; |
|
|
|
|
|
} |
|
|
cc20_init_block(ctx, nonce); |
|
|
|
|
|
} |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
int cc20_crypt (unsigned char *out, const unsigned char *in, size_t in_len, |
|
|
|
|
|
const unsigned char *iv, cc20_context_t *ctx) { |
|
|
|
|
|
|
|
|
|
|
|
uint8_t *keystream8 = (uint8_t*)ctx->keystream32; |
|
|
|
|
|
uint32_t * in_p = (uint32_t*)in; |
|
|
|
|
|
uint32_t * out_p = (uint32_t*)out; |
|
|
|
|
|
size_t tmp_len = in_len; |
|
|
|
|
|
|
|
|
|
|
|
cc20_init_context(ctx, iv); |
|
|
|
|
|
|
|
|
|
|
|
while(in_len >= 64) { |
|
|
if (in_len) { |
|
|
|
|
|
|
|
|
cc20_block_next(ctx); |
|
|
k0 = a; k1 = b; k2 = c; k3 = d; |
|
|
|
|
|
|
|
|
*(uint32_t*)out_p = *(uint32_t*)in_p ^ ctx->keystream32[ 0]; in_p++; out_p++; |
|
|
// 10 double rounds
|
|
|
*(uint32_t*)out_p = *(uint32_t*)in_p ^ ctx->keystream32[ 1]; in_p++; out_p++; |
|
|
CC20_DOUBLE_ROUND(k0, k1, k2, k3); |
|
|
*(uint32_t*)out_p = *(uint32_t*)in_p ^ ctx->keystream32[ 2]; in_p++; out_p++; |
|
|
CC20_DOUBLE_ROUND(k0, k1, k2, k3); |
|
|
*(uint32_t*)out_p = *(uint32_t*)in_p ^ ctx->keystream32[ 3]; in_p++; out_p++; |
|
|
CC20_DOUBLE_ROUND(k0, k1, k2, k3); |
|
|
*(uint32_t*)out_p = *(uint32_t*)in_p ^ ctx->keystream32[ 4]; in_p++; out_p++; |
|
|
CC20_DOUBLE_ROUND(k0, k1, k2, k3); |
|
|
*(uint32_t*)out_p = *(uint32_t*)in_p ^ ctx->keystream32[ 5]; in_p++; out_p++; |
|
|
CC20_DOUBLE_ROUND(k0, k1, k2, k3); |
|
|
*(uint32_t*)out_p = *(uint32_t*)in_p ^ ctx->keystream32[ 6]; in_p++; out_p++; |
|
|
CC20_DOUBLE_ROUND(k0, k1, k2, k3); |
|
|
*(uint32_t*)out_p = *(uint32_t*)in_p ^ ctx->keystream32[ 7]; in_p++; out_p++; |
|
|
CC20_DOUBLE_ROUND(k0, k1, k2, k3); |
|
|
*(uint32_t*)out_p = *(uint32_t*)in_p ^ ctx->keystream32[ 8]; in_p++; out_p++; |
|
|
CC20_DOUBLE_ROUND(k0, k1, k2, k3); |
|
|
*(uint32_t*)out_p = *(uint32_t*)in_p ^ ctx->keystream32[ 9]; in_p++; out_p++; |
|
|
CC20_DOUBLE_ROUND(k0, k1, k2, k3); |
|
|
*(uint32_t*)out_p = *(uint32_t*)in_p ^ ctx->keystream32[10]; in_p++; out_p++; |
|
|
CC20_DOUBLE_ROUND(k0, k1, k2, k3); |
|
|
*(uint32_t*)out_p = *(uint32_t*)in_p ^ ctx->keystream32[11]; in_p++; out_p++; |
|
|
|
|
|
*(uint32_t*)out_p = *(uint32_t*)in_p ^ ctx->keystream32[12]; in_p++; out_p++; |
|
|
|
|
|
*(uint32_t*)out_p = *(uint32_t*)in_p ^ ctx->keystream32[13]; in_p++; out_p++; |
|
|
|
|
|
*(uint32_t*)out_p = *(uint32_t*)in_p ^ ctx->keystream32[14]; in_p++; out_p++; |
|
|
|
|
|
*(uint32_t*)out_p = *(uint32_t*)in_p ^ ctx->keystream32[15]; in_p++; out_p++; |
|
|
|
|
|
in_len -= 64; |
|
|
|
|
|
} |
|
|
|
|
|
|
|
|
|
|
|
if(in_len > 0) { |
|
|
k0 = ADD(k0, a); k1 = ADD(k1, b); k2 = ADD(k2, c); k3 = ADD(k3, d); |
|
|
|
|
|
|
|
|
cc20_block_next(ctx); |
|
|
_mm_storeu_si128 ((__m128i*)&(ctx->keystream32[ 0]), k0); |
|
|
|
|
|
_mm_storeu_si128 ((__m128i*)&(ctx->keystream32[ 4]), k1); |
|
|
|
|
|
_mm_storeu_si128 ((__m128i*)&(ctx->keystream32[ 8]), k2); |
|
|
|
|
|
_mm_storeu_si128 ((__m128i*)&(ctx->keystream32[12]), k3); |
|
|
|
|
|
|
|
|
tmp_len -= in_len; |
|
|
// keep in mind that out and in got increased inside the last loop
|
|
|
|
|
|
// and point to current position now
|
|
|
while(in_len > 0) { |
|
|
while(in_len > 0) { |
|
|
out[tmp_len] = in[tmp_len] ^ keystream8[tmp_len%64]; |
|
|
|
|
|
tmp_len++; |
|
|
|
|
|
in_len--; |
|
|
in_len--; |
|
|
|
|
|
out[in_len] = in[in_len] ^ keystream8[in_len]; |
|
|
} |
|
|
} |
|
|
|
|
|
|
|
|
} |
|
|
} |
|
|
} |
|
|
} |
|
|
|
|
|
|
|
@ -286,7 +273,6 @@ static void cc20_init_block(cc20_context_t *ctx, const uint8_t nonce[]) { |
|
|
static void cc20_block_next(cc20_context_t *ctx) { |
|
|
static void cc20_block_next(cc20_context_t *ctx) { |
|
|
|
|
|
|
|
|
uint32_t *counter = ctx->state + 12; |
|
|
uint32_t *counter = ctx->state + 12; |
|
|
uint32_t c; |
|
|
|
|
|
|
|
|
|
|
|
ctx->keystream32[ 0] = ctx->state[ 0]; |
|
|
ctx->keystream32[ 0] = ctx->state[ 0]; |
|
|
ctx->keystream32[ 1] = ctx->state[ 1]; |
|
|
ctx->keystream32[ 1] = ctx->state[ 1]; |
|
@ -335,24 +321,7 @@ static void cc20_block_next(cc20_context_t *ctx) { |
|
|
ctx->keystream32[15] += ctx->state[15]; |
|
|
ctx->keystream32[15] += ctx->state[15]; |
|
|
|
|
|
|
|
|
// increment counter, make sure it is and stays little endian in memory
|
|
|
// increment counter, make sure it is and stays little endian in memory
|
|
|
c = le32toh(counter[0]); |
|
|
*counter = htole32(le32toh(*counter)+1); |
|
|
counter[0] = htole32(++c); |
|
|
|
|
|
if(0 == counter[0]) { |
|
|
|
|
|
// wrap around occured, increment higher 32 bits of counter
|
|
|
|
|
|
// unlikely with 1,500 byte sized packets
|
|
|
|
|
|
c = le32toh(counter[1]); |
|
|
|
|
|
counter[1] = htole32(++c); |
|
|
|
|
|
if(0 == counter[1]) { |
|
|
|
|
|
// very unlikely
|
|
|
|
|
|
c = le32toh(counter[2]); |
|
|
|
|
|
counter[2] = htole32(++c); |
|
|
|
|
|
if(0 == counter[2]) { |
|
|
|
|
|
// extremely unlikely
|
|
|
|
|
|
c = le32toh(counter[3]); |
|
|
|
|
|
counter[3] = htole32(++c); |
|
|
|
|
|
} |
|
|
|
|
|
} |
|
|
|
|
|
} |
|
|
|
|
|
} |
|
|
} |
|
|
|
|
|
|
|
|
|
|
|
|
|
|