1 // SPDX-License-Identifier: GPL-2.0+
3 * fscrypt-crypt-util.c - utility for verifying fscrypt-encrypted data
5 * Copyright 2019 Google LLC
9 * This program implements all crypto algorithms supported by fscrypt (a.k.a.
10 * ext4, f2fs, and ubifs encryption), for the purpose of verifying the
11 * correctness of the ciphertext stored on-disk. See usage() below.
13 * All algorithms are implemented in portable C code to avoid depending on
14 * libcrypto (OpenSSL), and because some fscrypt-supported algorithms aren't
15 * available in libcrypto anyway (e.g. Adiantum), or are only supported in
16 * recent versions (e.g. HKDF-SHA512). For simplicity, all crypto code here
17 * tries to follow the mathematical definitions directly, without optimizing for
18 * performance or worrying about following security best practices such as
19 * mitigating side-channel attacks. So, only use this program for testing!
22 #include <asm/byteorder.h>
26 #include <linux/types.h>
35 #define PROGRAM_NAME "fscrypt-crypt-util"
38 * Define to enable the tests of the crypto code in this file. If enabled, you
39 * must link this program with OpenSSL (-lcrypto) v1.1.0 or later, and your
40 * kernel needs CONFIG_CRYPTO_USER_API_SKCIPHER=y, CONFIG_CRYPTO_ADIANTUM=y, and
41 * CONFIG_CRYPTO_HCTR2=y.
43 #undef ENABLE_ALG_TESTS
45 #define NUM_ALG_TEST_ITERATIONS 10000
47 static void usage(FILE *fp)
50 "Usage: " PROGRAM_NAME " [OPTION]... [CIPHER | --dump-key-identifier] MASTER_KEY\n"
52 "Utility for verifying fscrypt-encrypted data. This program encrypts\n"
53 "(or decrypts) the data on stdin using the given CIPHER with the given\n"
54 "MASTER_KEY (or a key derived from it, if a KDF is specified), and writes the\n"
55 "resulting ciphertext (or plaintext) to stdout.\n"
57 "CIPHER can be AES-256-XTS, AES-256-CTS-CBC, AES-128-CBC-ESSIV, AES-128-CTS-CBC,\n"
58 "Adiantum, or AES-256-HCTR2. MASTER_KEY must be a hex string long enough for\n"
61 "WARNING: this program is only meant for testing, not for \"real\" use!\n"
64 " --data-unit-index=DUIDX Starting data unit index for IV generation.\n"
66 " --data-unit-size=DUSIZE Encrypt each DUSIZE bytes independently.\n"
67 " Default: 4096 bytes\n"
68 " --decrypt Decrypt instead of encrypt\n"
69 " --direct-key Use the format where the IVs include the file\n"
70 " nonce and the same key is shared across files.\n"
71 " --dump-key-identifier Instead of encrypting/decrypting data, just\n"
72 " compute and dump the key identifier.\n"
73 " --file-nonce=NONCE File's nonce as a 32-character hex string\n"
74 " --fs-uuid=UUID The filesystem UUID as a 32-character hex string.\n"
75 " Required for --iv-ino-lblk-32 and\n"
76 " --iv-ino-lblk-64; otherwise is unused.\n"
77 " --help Show this help\n"
78 " --inode-number=INUM The file's inode number. Required for\n"
79 " --iv-ino-lblk-32 and --iv-ino-lblk-64;\n"
80 " otherwise is unused.\n"
81 " --iv-ino-lblk-32 Similar to --iv-ino-lblk-64, but selects the\n"
83 " --iv-ino-lblk-64 Use the format where the IVs include the inode\n"
84 " number and the same key is shared across files.\n"
85 " --kdf=KDF Key derivation function to use: AES-128-ECB,\n"
86 " HKDF-SHA512, or none. Default: none\n"
87 " --mode-num=NUM The encryption mode number. This may be required\n"
88 " for key derivation, depending on other options.\n"
89 " --padding=PADDING If last data unit is partial, zero-pad it to next\n"
90 " PADDING-byte boundary. Default: DUSIZE\n"
94 /*----------------------------------------------------------------------------*
96 *----------------------------------------------------------------------------*/
98 #define ARRAY_SIZE(A) (sizeof(A) / sizeof((A)[0]))
99 #define MIN(x, y) ((x) < (y) ? (x) : (y))
100 #define MAX(x, y) ((x) > (y) ? (x) : (y))
101 #define ROUND_DOWN(x, y) ((x) & ~((y) - 1))
102 #define ROUND_UP(x, y) (((x) + (y) - 1) & ~((y) - 1))
103 #define DIV_ROUND_UP(n, d) (((n) + (d) - 1) / (d))
104 #define STATIC_ASSERT(e) ((void)sizeof(char[1 - 2*!(e)]))
111 #define cpu_to_le32 __cpu_to_le32
112 #define cpu_to_be32 __cpu_to_be32
113 #define cpu_to_le64 __cpu_to_le64
114 #define cpu_to_be64 __cpu_to_be64
115 #define le32_to_cpu __le32_to_cpu
116 #define be32_to_cpu __be32_to_cpu
117 #define le64_to_cpu __le64_to_cpu
118 #define be64_to_cpu __be64_to_cpu
120 #define DEFINE_UNALIGNED_ACCESS_HELPERS(type, native_type) \
121 static inline native_type __attribute__((unused)) \
122 get_unaligned_##type(const void *p) \
126 memcpy(&x, p, sizeof(x)); \
127 return type##_to_cpu(x); \
130 static inline void __attribute__((unused)) \
131 put_unaligned_##type(native_type v, void *p) \
133 __##type x = cpu_to_##type(v); \
135 memcpy(p, &x, sizeof(x)); \
138 DEFINE_UNALIGNED_ACCESS_HELPERS(le32, u32)
139 DEFINE_UNALIGNED_ACCESS_HELPERS(be32, u32)
140 DEFINE_UNALIGNED_ACCESS_HELPERS(le64, u64)
141 DEFINE_UNALIGNED_ACCESS_HELPERS(be64, u64)
143 static inline bool is_power_of_2(unsigned long v)
145 return v != 0 && (v & (v - 1)) == 0;
148 static inline u32 rol32(u32 v, int n)
150 return (v << n) | (v >> (32 - n));
153 static inline u32 ror32(u32 v, int n)
155 return (v >> n) | (v << (32 - n));
158 static inline u64 rol64(u64 v, int n)
160 return (v << n) | (v >> (64 - n));
163 static inline u64 ror64(u64 v, int n)
165 return (v >> n) | (v << (64 - n));
168 static inline void xor(u8 *res, const u8 *a, const u8 *b, size_t count)
171 *res++ = *a++ ^ *b++;
174 static void __attribute__((noreturn, format(printf, 2, 3)))
175 do_die(int err, const char *format, ...)
179 va_start(va, format);
180 fputs("[" PROGRAM_NAME "] ERROR: ", stderr);
181 vfprintf(stderr, format, va);
183 fprintf(stderr, ": %s", strerror(errno));
189 #define die(format, ...) do_die(0, (format), ##__VA_ARGS__)
190 #define die_errno(format, ...) do_die(errno, (format), ##__VA_ARGS__)
192 static __attribute__((noreturn)) void
193 assertion_failed(const char *expr, const char *file, int line)
195 die("Assertion failed: %s at %s:%d", expr, file, line);
198 #define ASSERT(e) ({ if (!(e)) assertion_failed(#e, __FILE__, __LINE__); })
200 static void *xmalloc(size_t size)
202 void *p = malloc(size);
208 static int hexchar2bin(char c)
210 if (c >= 'a' && c <= 'f')
212 if (c >= 'A' && c <= 'F')
214 if (c >= '0' && c <= '9')
219 static int hex2bin(const char *hex, u8 *bin, int max_bin_size)
221 size_t len = strlen(hex);
227 if (len > max_bin_size)
230 for (i = 0; i < len; i++) {
231 int high = hexchar2bin(hex[2 * i]);
232 int low = hexchar2bin(hex[2 * i + 1]);
234 if (high < 0 || low < 0)
236 bin[i] = (high << 4) | low;
241 static size_t xread(int fd, void *buf, size_t count)
243 const size_t orig_count = count;
246 ssize_t res = read(fd, buf, count);
249 die_errno("read error");
255 return orig_count - count;
258 static void full_write(int fd, const void *buf, size_t count)
261 ssize_t res = write(fd, buf, count);
264 die_errno("write error");
270 #ifdef ENABLE_ALG_TESTS
271 static void rand_bytes(u8 *buf, size_t count)
277 #include <linux/if_alg.h>
278 #include <sys/socket.h>
280 static void af_alg_crypt(int algfd, int op, const u8 *key, size_t keylen,
281 const u8 *iv, size_t ivlen,
282 const u8 *src, u8 *dst, size_t datalen)
284 size_t controllen = CMSG_SPACE(sizeof(int)) +
285 CMSG_SPACE(sizeof(struct af_alg_iv) + ivlen);
286 u8 *control = xmalloc(controllen);
287 struct iovec iov = { .iov_base = (u8 *)src, .iov_len = datalen };
288 struct msghdr msg = {
291 .msg_control = control,
292 .msg_controllen = controllen,
294 struct cmsghdr *cmsg;
295 struct af_alg_iv *algiv;
298 memset(control, 0, controllen);
300 cmsg = CMSG_FIRSTHDR(&msg);
301 cmsg->cmsg_len = CMSG_LEN(sizeof(int));
302 cmsg->cmsg_level = SOL_ALG;
303 cmsg->cmsg_type = ALG_SET_OP;
304 *(int *)CMSG_DATA(cmsg) = op;
306 cmsg = CMSG_NXTHDR(&msg, cmsg);
307 cmsg->cmsg_len = CMSG_LEN(sizeof(struct af_alg_iv) + ivlen);
308 cmsg->cmsg_level = SOL_ALG;
309 cmsg->cmsg_type = ALG_SET_IV;
310 algiv = (struct af_alg_iv *)CMSG_DATA(cmsg);
311 algiv->ivlen = ivlen;
312 memcpy(algiv->iv, iv, ivlen);
314 if (setsockopt(algfd, SOL_ALG, ALG_SET_KEY, key, keylen) != 0)
315 die_errno("can't set key on AF_ALG socket");
317 reqfd = accept(algfd, NULL, NULL);
319 die_errno("can't accept() AF_ALG socket");
320 if (sendmsg(reqfd, &msg, 0) != datalen)
321 die_errno("can't sendmsg() AF_ALG request socket");
322 if (xread(reqfd, dst, datalen) != datalen)
323 die("short read from AF_ALG request socket");
328 #endif /* ENABLE_ALG_TESTS */
330 /*----------------------------------------------------------------------------*
331 * Finite field arithmetic *
332 *----------------------------------------------------------------------------*/
334 /* Multiply a GF(2^8) element by the polynomial 'x' */
335 static inline u8 gf2_8_mul_x(u8 b)
337 return (b << 1) ^ ((b & 0x80) ? 0x1B : 0);
340 /* Multiply four packed GF(2^8) elements by the polynomial 'x' */
341 static inline u32 gf2_8_mul_x_4way(u32 w)
343 return ((w & 0x7F7F7F7F) << 1) ^ (((w & 0x80808080) >> 7) * 0x1B);
346 /* Element of GF(2^128) */
352 /* Multiply a GF(2^128) element by the polynomial 'x' */
353 static inline void gf2_128_mul_x_xts(ble128 *t)
355 u64 lo = le64_to_cpu(t->lo);
356 u64 hi = le64_to_cpu(t->hi);
358 t->hi = cpu_to_le64((hi << 1) | (lo >> 63));
359 t->lo = cpu_to_le64((lo << 1) ^ ((hi & (1ULL << 63)) ? 0x87 : 0));
362 static inline void gf2_128_mul_x_polyval(ble128 *t)
364 u64 lo = le64_to_cpu(t->lo);
365 u64 hi = le64_to_cpu(t->hi);
366 u64 lo_reducer = (hi & (1ULL << 63)) ? 1 : 0;
367 u64 hi_reducer = (hi & (1ULL << 63)) ? 0xc2ULL << 56 : 0;
369 t->hi = cpu_to_le64(((hi << 1) | (lo >> 63)) ^ hi_reducer);
370 t->lo = cpu_to_le64((lo << 1) ^ lo_reducer);
373 static void gf2_128_mul_polyval(ble128 *r, const ble128 *b)
377 u64 lo = le64_to_cpu(b->lo);
378 u64 hi = le64_to_cpu(b->hi);
380 memset(&p, 0, sizeof(p));
381 for (i = 0; i < 64; i++) {
382 if (lo & (1ULL << i))
383 xor((u8 *)&p, (u8 *)&p, (u8 *)r, sizeof(p));
384 gf2_128_mul_x_polyval(r);
386 for (i = 0; i < 64; i++) {
387 if (hi & (1ULL << i))
388 xor((u8 *)&p, (u8 *)&p, (u8 *)r, sizeof(p));
389 gf2_128_mul_x_polyval(r);
394 /*----------------------------------------------------------------------------*
396 *----------------------------------------------------------------------------*/
398 /* Element of Z/(2^{128}Z) (a.k.a. the integers modulo 2^128) */
404 static inline void le128_add(le128 *res, const le128 *a, const le128 *b)
406 u64 a_lo = le64_to_cpu(a->lo);
407 u64 b_lo = le64_to_cpu(b->lo);
409 res->lo = cpu_to_le64(a_lo + b_lo);
410 res->hi = cpu_to_le64(le64_to_cpu(a->hi) + le64_to_cpu(b->hi) +
411 (a_lo + b_lo < a_lo));
414 static inline void le128_sub(le128 *res, const le128 *a, const le128 *b)
416 u64 a_lo = le64_to_cpu(a->lo);
417 u64 b_lo = le64_to_cpu(b->lo);
419 res->lo = cpu_to_le64(a_lo - b_lo);
420 res->hi = cpu_to_le64(le64_to_cpu(a->hi) - le64_to_cpu(b->hi) -
421 (a_lo - b_lo > a_lo));
424 /*----------------------------------------------------------------------------*
426 *----------------------------------------------------------------------------*/
429 * Reference: "FIPS 197, Advanced Encryption Standard"
430 * https://nvlpubs.nist.gov/nistpubs/fips/nist.fips.197.pdf
433 #define AES_BLOCK_SIZE 16
434 #define AES_128_KEY_SIZE 16
435 #define AES_192_KEY_SIZE 24
436 #define AES_256_KEY_SIZE 32
438 static inline void AddRoundKey(u32 state[4], const u32 *rk)
442 for (i = 0; i < 4; i++)
446 static const u8 aes_sbox[256] = {
447 0x63, 0x7c, 0x77, 0x7b, 0xf2, 0x6b, 0x6f, 0xc5, 0x30, 0x01, 0x67, 0x2b,
448 0xfe, 0xd7, 0xab, 0x76, 0xca, 0x82, 0xc9, 0x7d, 0xfa, 0x59, 0x47, 0xf0,
449 0xad, 0xd4, 0xa2, 0xaf, 0x9c, 0xa4, 0x72, 0xc0, 0xb7, 0xfd, 0x93, 0x26,
450 0x36, 0x3f, 0xf7, 0xcc, 0x34, 0xa5, 0xe5, 0xf1, 0x71, 0xd8, 0x31, 0x15,
451 0x04, 0xc7, 0x23, 0xc3, 0x18, 0x96, 0x05, 0x9a, 0x07, 0x12, 0x80, 0xe2,
452 0xeb, 0x27, 0xb2, 0x75, 0x09, 0x83, 0x2c, 0x1a, 0x1b, 0x6e, 0x5a, 0xa0,
453 0x52, 0x3b, 0xd6, 0xb3, 0x29, 0xe3, 0x2f, 0x84, 0x53, 0xd1, 0x00, 0xed,
454 0x20, 0xfc, 0xb1, 0x5b, 0x6a, 0xcb, 0xbe, 0x39, 0x4a, 0x4c, 0x58, 0xcf,
455 0xd0, 0xef, 0xaa, 0xfb, 0x43, 0x4d, 0x33, 0x85, 0x45, 0xf9, 0x02, 0x7f,
456 0x50, 0x3c, 0x9f, 0xa8, 0x51, 0xa3, 0x40, 0x8f, 0x92, 0x9d, 0x38, 0xf5,
457 0xbc, 0xb6, 0xda, 0x21, 0x10, 0xff, 0xf3, 0xd2, 0xcd, 0x0c, 0x13, 0xec,
458 0x5f, 0x97, 0x44, 0x17, 0xc4, 0xa7, 0x7e, 0x3d, 0x64, 0x5d, 0x19, 0x73,
459 0x60, 0x81, 0x4f, 0xdc, 0x22, 0x2a, 0x90, 0x88, 0x46, 0xee, 0xb8, 0x14,
460 0xde, 0x5e, 0x0b, 0xdb, 0xe0, 0x32, 0x3a, 0x0a, 0x49, 0x06, 0x24, 0x5c,
461 0xc2, 0xd3, 0xac, 0x62, 0x91, 0x95, 0xe4, 0x79, 0xe7, 0xc8, 0x37, 0x6d,
462 0x8d, 0xd5, 0x4e, 0xa9, 0x6c, 0x56, 0xf4, 0xea, 0x65, 0x7a, 0xae, 0x08,
463 0xba, 0x78, 0x25, 0x2e, 0x1c, 0xa6, 0xb4, 0xc6, 0xe8, 0xdd, 0x74, 0x1f,
464 0x4b, 0xbd, 0x8b, 0x8a, 0x70, 0x3e, 0xb5, 0x66, 0x48, 0x03, 0xf6, 0x0e,
465 0x61, 0x35, 0x57, 0xb9, 0x86, 0xc1, 0x1d, 0x9e, 0xe1, 0xf8, 0x98, 0x11,
466 0x69, 0xd9, 0x8e, 0x94, 0x9b, 0x1e, 0x87, 0xe9, 0xce, 0x55, 0x28, 0xdf,
467 0x8c, 0xa1, 0x89, 0x0d, 0xbf, 0xe6, 0x42, 0x68, 0x41, 0x99, 0x2d, 0x0f,
468 0xb0, 0x54, 0xbb, 0x16,
471 static u8 aes_inverse_sbox[256];
473 static void aes_init(void)
477 for (i = 0; i < 256; i++)
478 aes_inverse_sbox[aes_sbox[i]] = i;
481 static inline u32 DoSubWord(u32 w, const u8 sbox[256])
483 return ((u32)sbox[(u8)(w >> 24)] << 24) |
484 ((u32)sbox[(u8)(w >> 16)] << 16) |
485 ((u32)sbox[(u8)(w >> 8)] << 8) |
486 ((u32)sbox[(u8)(w >> 0)] << 0);
489 static inline u32 SubWord(u32 w)
491 return DoSubWord(w, aes_sbox);
494 static inline u32 InvSubWord(u32 w)
496 return DoSubWord(w, aes_inverse_sbox);
499 static inline void SubBytes(u32 state[4])
503 for (i = 0; i < 4; i++)
504 state[i] = SubWord(state[i]);
507 static inline void InvSubBytes(u32 state[4])
511 for (i = 0; i < 4; i++)
512 state[i] = InvSubWord(state[i]);
515 static inline void DoShiftRows(u32 state[4], int direction)
520 for (i = 0; i < 4; i++)
521 newstate[i] = (state[(i + direction*0) & 3] & 0xff) |
522 (state[(i + direction*1) & 3] & 0xff00) |
523 (state[(i + direction*2) & 3] & 0xff0000) |
524 (state[(i + direction*3) & 3] & 0xff000000);
525 memcpy(state, newstate, 16);
528 static inline void ShiftRows(u32 state[4])
530 DoShiftRows(state, 1);
533 static inline void InvShiftRows(u32 state[4])
535 DoShiftRows(state, -1);
539 * Mix one column by doing the following matrix multiplication in GF(2^8):
541 * | 2 3 1 1 | | w[0] |
542 * | 1 2 3 1 | | w[1] |
543 * | 1 1 2 3 | x | w[2] |
544 * | 3 1 1 2 | | w[3] |
546 * a.k.a. w[i] = 2*w[i] + 3*w[(i+1)%4] + w[(i+2)%4] + w[(i+3)%4]
548 static inline u32 MixColumn(u32 w)
550 u32 _2w0_w2 = gf2_8_mul_x_4way(w) ^ ror32(w, 16);
551 u32 _3w1_w3 = ror32(_2w0_w2 ^ w, 8);
553 return _2w0_w2 ^ _3w1_w3;
557 * ( | 5 0 4 0 | | w[0] | )
558 * ( | 0 5 0 4 | | w[1] | )
559 * MixColumn( | 4 0 5 0 | x | w[2] | )
560 * ( | 0 4 0 5 | | w[3] | )
562 static inline u32 InvMixColumn(u32 w)
564 u32 _4w = gf2_8_mul_x_4way(gf2_8_mul_x_4way(w));
566 return MixColumn(_4w ^ w ^ ror32(_4w, 16));
569 static inline void MixColumns(u32 state[4])
573 for (i = 0; i < 4; i++)
574 state[i] = MixColumn(state[i]);
577 static inline void InvMixColumns(u32 state[4])
581 for (i = 0; i < 4; i++)
582 state[i] = InvMixColumn(state[i]);
586 u32 round_keys[15 * 4];
590 /* Expand an AES key */
591 static void aes_setkey(struct aes_key *k, const u8 *key, int keysize)
593 const int N = keysize / 4;
594 u32 * const rk = k->round_keys;
598 ASSERT(keysize == 16 || keysize == 24 || keysize == 32);
600 for (i = 0; i < 4 * (k->nrounds + 1); i++) {
602 rk[i] = get_unaligned_le32(&key[i * sizeof(__le32)]);
603 } else if (i % N == 0) {
604 rk[i] = rk[i - N] ^ SubWord(ror32(rk[i - 1], 8)) ^ rcon;
605 rcon = gf2_8_mul_x(rcon);
606 } else if (N > 6 && i % N == 4) {
607 rk[i] = rk[i - N] ^ SubWord(rk[i - 1]);
609 rk[i] = rk[i - N] ^ rk[i - 1];
614 /* Encrypt one 16-byte block with AES */
615 static void aes_encrypt(const struct aes_key *k, const u8 src[AES_BLOCK_SIZE],
616 u8 dst[AES_BLOCK_SIZE])
621 for (i = 0; i < 4; i++)
622 state[i] = get_unaligned_le32(&src[i * sizeof(__le32)]);
624 AddRoundKey(state, k->round_keys);
625 for (i = 1; i < k->nrounds; i++) {
629 AddRoundKey(state, &k->round_keys[4 * i]);
633 AddRoundKey(state, &k->round_keys[4 * i]);
635 for (i = 0; i < 4; i++)
636 put_unaligned_le32(state[i], &dst[i * sizeof(__le32)]);
639 /* Decrypt one 16-byte block with AES */
640 static void aes_decrypt(const struct aes_key *k, const u8 src[AES_BLOCK_SIZE],
641 u8 dst[AES_BLOCK_SIZE])
646 for (i = 0; i < 4; i++)
647 state[i] = get_unaligned_le32(&src[i * sizeof(__le32)]);
649 AddRoundKey(state, &k->round_keys[4 * k->nrounds]);
652 for (i = k->nrounds - 1; i >= 1; i--) {
653 AddRoundKey(state, &k->round_keys[4 * i]);
654 InvMixColumns(state);
658 AddRoundKey(state, k->round_keys);
660 for (i = 0; i < 4; i++)
661 put_unaligned_le32(state[i], &dst[i * sizeof(__le32)]);
664 #ifdef ENABLE_ALG_TESTS
665 #include <openssl/evp.h>
666 static void test_aes_keysize(int keysize)
668 unsigned long num_tests = NUM_ALG_TEST_ITERATIONS;
669 const EVP_CIPHER *evp_cipher;
674 evp_cipher = EVP_aes_128_ecb();
677 evp_cipher = EVP_aes_192_ecb();
680 evp_cipher = EVP_aes_256_ecb();
686 ctx = EVP_CIPHER_CTX_new();
688 while (num_tests--) {
690 u8 key[AES_256_KEY_SIZE];
691 u8 ptext[AES_BLOCK_SIZE];
692 u8 ctext[AES_BLOCK_SIZE];
693 u8 ref_ctext[AES_BLOCK_SIZE];
694 u8 decrypted[AES_BLOCK_SIZE];
697 rand_bytes(key, keysize);
698 rand_bytes(ptext, AES_BLOCK_SIZE);
700 aes_setkey(&k, key, keysize);
701 aes_encrypt(&k, ptext, ctext);
703 res = EVP_EncryptInit_ex(ctx, evp_cipher, NULL, key, NULL);
705 res = EVP_EncryptUpdate(ctx, ref_ctext, &outl, ptext,
708 ASSERT(outl == AES_BLOCK_SIZE);
709 ASSERT(memcmp(ctext, ref_ctext, AES_BLOCK_SIZE) == 0);
711 aes_decrypt(&k, ctext, decrypted);
712 ASSERT(memcmp(ptext, decrypted, AES_BLOCK_SIZE) == 0);
714 EVP_CIPHER_CTX_free(ctx);
717 static void test_aes(void)
719 test_aes_keysize(AES_128_KEY_SIZE);
720 test_aes_keysize(AES_192_KEY_SIZE);
721 test_aes_keysize(AES_256_KEY_SIZE);
723 #endif /* ENABLE_ALG_TESTS */
725 /*----------------------------------------------------------------------------*
726 * SHA-512 and SHA-256 *
727 *----------------------------------------------------------------------------*/
730 * Reference: "FIPS 180-2, Secure Hash Standard"
731 * https://csrc.nist.gov/csrc/media/publications/fips/180/2/archive/2002-08-01/documents/fips180-2withchangenotice.pdf
734 #define SHA512_DIGEST_SIZE 64
735 #define SHA512_BLOCK_SIZE 128
737 #define SHA256_DIGEST_SIZE 32
738 #define SHA256_BLOCK_SIZE 64
740 #define Ch(x, y, z) (((x) & (y)) ^ (~(x) & (z)))
741 #define Maj(x, y, z) (((x) & (y)) ^ ((x) & (z)) ^ ((y) & (z)))
743 #define Sigma512_0(x) (ror64((x), 28) ^ ror64((x), 34) ^ ror64((x), 39))
744 #define Sigma512_1(x) (ror64((x), 14) ^ ror64((x), 18) ^ ror64((x), 41))
745 #define sigma512_0(x) (ror64((x), 1) ^ ror64((x), 8) ^ ((x) >> 7))
746 #define sigma512_1(x) (ror64((x), 19) ^ ror64((x), 61) ^ ((x) >> 6))
748 #define Sigma256_0(x) (ror32((x), 2) ^ ror32((x), 13) ^ ror32((x), 22))
749 #define Sigma256_1(x) (ror32((x), 6) ^ ror32((x), 11) ^ ror32((x), 25))
750 #define sigma256_0(x) (ror32((x), 7) ^ ror32((x), 18) ^ ((x) >> 3))
751 #define sigma256_1(x) (ror32((x), 17) ^ ror32((x), 19) ^ ((x) >> 10))
753 static const u64 sha512_iv[8] = {
754 0x6a09e667f3bcc908, 0xbb67ae8584caa73b, 0x3c6ef372fe94f82b,
755 0xa54ff53a5f1d36f1, 0x510e527fade682d1, 0x9b05688c2b3e6c1f,
756 0x1f83d9abfb41bd6b, 0x5be0cd19137e2179,
759 static const u64 sha512_round_constants[80] = {
760 0x428a2f98d728ae22, 0x7137449123ef65cd, 0xb5c0fbcfec4d3b2f,
761 0xe9b5dba58189dbbc, 0x3956c25bf348b538, 0x59f111f1b605d019,
762 0x923f82a4af194f9b, 0xab1c5ed5da6d8118, 0xd807aa98a3030242,
763 0x12835b0145706fbe, 0x243185be4ee4b28c, 0x550c7dc3d5ffb4e2,
764 0x72be5d74f27b896f, 0x80deb1fe3b1696b1, 0x9bdc06a725c71235,
765 0xc19bf174cf692694, 0xe49b69c19ef14ad2, 0xefbe4786384f25e3,
766 0x0fc19dc68b8cd5b5, 0x240ca1cc77ac9c65, 0x2de92c6f592b0275,
767 0x4a7484aa6ea6e483, 0x5cb0a9dcbd41fbd4, 0x76f988da831153b5,
768 0x983e5152ee66dfab, 0xa831c66d2db43210, 0xb00327c898fb213f,
769 0xbf597fc7beef0ee4, 0xc6e00bf33da88fc2, 0xd5a79147930aa725,
770 0x06ca6351e003826f, 0x142929670a0e6e70, 0x27b70a8546d22ffc,
771 0x2e1b21385c26c926, 0x4d2c6dfc5ac42aed, 0x53380d139d95b3df,
772 0x650a73548baf63de, 0x766a0abb3c77b2a8, 0x81c2c92e47edaee6,
773 0x92722c851482353b, 0xa2bfe8a14cf10364, 0xa81a664bbc423001,
774 0xc24b8b70d0f89791, 0xc76c51a30654be30, 0xd192e819d6ef5218,
775 0xd69906245565a910, 0xf40e35855771202a, 0x106aa07032bbd1b8,
776 0x19a4c116b8d2d0c8, 0x1e376c085141ab53, 0x2748774cdf8eeb99,
777 0x34b0bcb5e19b48a8, 0x391c0cb3c5c95a63, 0x4ed8aa4ae3418acb,
778 0x5b9cca4f7763e373, 0x682e6ff3d6b2b8a3, 0x748f82ee5defb2fc,
779 0x78a5636f43172f60, 0x84c87814a1f0ab72, 0x8cc702081a6439ec,
780 0x90befffa23631e28, 0xa4506cebde82bde9, 0xbef9a3f7b2c67915,
781 0xc67178f2e372532b, 0xca273eceea26619c, 0xd186b8c721c0c207,
782 0xeada7dd6cde0eb1e, 0xf57d4f7fee6ed178, 0x06f067aa72176fba,
783 0x0a637dc5a2c898a6, 0x113f9804bef90dae, 0x1b710b35131c471b,
784 0x28db77f523047d84, 0x32caab7b40c72493, 0x3c9ebe0a15c9bebc,
785 0x431d67c49c100d4c, 0x4cc5d4becb3e42b6, 0x597f299cfc657e2a,
786 0x5fcb6fab3ad6faec, 0x6c44198c4a475817,
789 /* Compute the SHA-512 digest of the given buffer */
790 static void sha512(const u8 *in, size_t inlen, u8 out[SHA512_DIGEST_SIZE])
792 const size_t msglen = ROUND_UP(inlen + 17, SHA512_BLOCK_SIZE);
793 u8 * const msg = xmalloc(msglen);
797 /* super naive way of handling the padding */
798 memcpy(msg, in, inlen);
799 memset(&msg[inlen], 0, msglen - inlen);
801 put_unaligned_be64((u64)inlen * 8, &msg[msglen - sizeof(__be64)]);
804 memcpy(H, sha512_iv, sizeof(H));
806 u64 a = H[0], b = H[1], c = H[2], d = H[3],
807 e = H[4], f = H[5], g = H[6], h = H[7];
810 for (i = 0; i < 16; i++)
811 W[i] = get_unaligned_be64(&in[i * sizeof(__be64)]);
812 for (; i < ARRAY_SIZE(W); i++)
813 W[i] = sigma512_1(W[i - 2]) + W[i - 7] +
814 sigma512_0(W[i - 15]) + W[i - 16];
815 for (i = 0; i < ARRAY_SIZE(W); i++) {
816 u64 T1 = h + Sigma512_1(e) + Ch(e, f, g) +
817 sha512_round_constants[i] + W[i];
818 u64 T2 = Sigma512_0(a) + Maj(a, b, c);
820 h = g; g = f; f = e; e = d + T1;
821 d = c; c = b; b = a; a = T1 + T2;
823 H[0] += a; H[1] += b; H[2] += c; H[3] += d;
824 H[4] += e; H[5] += f; H[6] += g; H[7] += h;
825 } while ((in += SHA512_BLOCK_SIZE) != &msg[msglen]);
827 for (i = 0; i < ARRAY_SIZE(H); i++)
828 put_unaligned_be64(H[i], &out[i * sizeof(__be64)]);
832 /* Compute the SHA-256 digest of the given buffer */
833 static void sha256(const u8 *in, size_t inlen, u8 out[SHA256_DIGEST_SIZE])
835 const size_t msglen = ROUND_UP(inlen + 9, SHA256_BLOCK_SIZE);
836 u8 * const msg = xmalloc(msglen);
840 /* super naive way of handling the padding */
841 memcpy(msg, in, inlen);
842 memset(&msg[inlen], 0, msglen - inlen);
844 put_unaligned_be64((u64)inlen * 8, &msg[msglen - sizeof(__be64)]);
847 for (i = 0; i < ARRAY_SIZE(H); i++)
848 H[i] = (u32)(sha512_iv[i] >> 32);
850 u32 a = H[0], b = H[1], c = H[2], d = H[3],
851 e = H[4], f = H[5], g = H[6], h = H[7];
854 for (i = 0; i < 16; i++)
855 W[i] = get_unaligned_be32(&in[i * sizeof(__be32)]);
856 for (; i < ARRAY_SIZE(W); i++)
857 W[i] = sigma256_1(W[i - 2]) + W[i - 7] +
858 sigma256_0(W[i - 15]) + W[i - 16];
859 for (i = 0; i < ARRAY_SIZE(W); i++) {
860 u32 T1 = h + Sigma256_1(e) + Ch(e, f, g) +
861 (u32)(sha512_round_constants[i] >> 32) + W[i];
862 u32 T2 = Sigma256_0(a) + Maj(a, b, c);
864 h = g; g = f; f = e; e = d + T1;
865 d = c; c = b; b = a; a = T1 + T2;
867 H[0] += a; H[1] += b; H[2] += c; H[3] += d;
868 H[4] += e; H[5] += f; H[6] += g; H[7] += h;
869 } while ((in += SHA256_BLOCK_SIZE) != &msg[msglen]);
871 for (i = 0; i < ARRAY_SIZE(H); i++)
872 put_unaligned_be32(H[i], &out[i * sizeof(__be32)]);
876 #ifdef ENABLE_ALG_TESTS
877 #include <openssl/sha.h>
878 static void test_sha2(void)
880 unsigned long num_tests = NUM_ALG_TEST_ITERATIONS;
882 while (num_tests--) {
884 u8 digest[SHA512_DIGEST_SIZE];
885 u8 ref_digest[SHA512_DIGEST_SIZE];
886 const size_t inlen = rand() % (1 + sizeof(in));
888 rand_bytes(in, inlen);
890 sha256(in, inlen, digest);
891 SHA256(in, inlen, ref_digest);
892 ASSERT(memcmp(digest, ref_digest, SHA256_DIGEST_SIZE) == 0);
894 sha512(in, inlen, digest);
895 SHA512(in, inlen, ref_digest);
896 ASSERT(memcmp(digest, ref_digest, SHA512_DIGEST_SIZE) == 0);
899 #endif /* ENABLE_ALG_TESTS */
901 /*----------------------------------------------------------------------------*
902 * HKDF implementation *
903 *----------------------------------------------------------------------------*/
905 static void hmac_sha512(const u8 *key, size_t keylen, const u8 *msg,
906 size_t msglen, u8 mac[SHA512_DIGEST_SIZE])
908 u8 *ibuf = xmalloc(SHA512_BLOCK_SIZE + msglen);
909 u8 obuf[SHA512_BLOCK_SIZE + SHA512_DIGEST_SIZE];
911 ASSERT(keylen <= SHA512_BLOCK_SIZE); /* keylen > bs not implemented */
913 memset(ibuf, 0x36, SHA512_BLOCK_SIZE);
914 xor(ibuf, ibuf, key, keylen);
915 memcpy(&ibuf[SHA512_BLOCK_SIZE], msg, msglen);
917 memset(obuf, 0x5c, SHA512_BLOCK_SIZE);
918 xor(obuf, obuf, key, keylen);
919 sha512(ibuf, SHA512_BLOCK_SIZE + msglen, &obuf[SHA512_BLOCK_SIZE]);
920 sha512(obuf, sizeof(obuf), mac);
925 static void hkdf_sha512(const u8 *ikm, size_t ikmlen,
926 const u8 *salt, size_t saltlen,
927 const u8 *info, size_t infolen,
928 u8 *output, size_t outlen)
930 static const u8 default_salt[SHA512_DIGEST_SIZE];
931 u8 prk[SHA512_DIGEST_SIZE]; /* pseudorandom key */
932 u8 *buf = xmalloc(1 + infolen + SHA512_DIGEST_SIZE);
938 saltlen = sizeof(default_salt);
943 hmac_sha512(salt, saltlen, ikm, ikmlen, prk);
946 for (i = 0; i < outlen; i += SHA512_DIGEST_SIZE) {
948 u8 tmp[SHA512_DIGEST_SIZE];
950 ASSERT(counter != 0);
952 memcpy(p, &output[i - SHA512_DIGEST_SIZE],
954 p += SHA512_DIGEST_SIZE;
956 memcpy(p, info, infolen);
959 hmac_sha512(prk, sizeof(prk), buf, p - buf, tmp);
960 memcpy(&output[i], tmp, MIN(sizeof(tmp), outlen - i));
965 #ifdef ENABLE_ALG_TESTS
966 #include <openssl/evp.h>
967 #include <openssl/kdf.h>
968 static void openssl_hkdf_sha512(const u8 *ikm, size_t ikmlen,
969 const u8 *salt, size_t saltlen,
970 const u8 *info, size_t infolen,
971 u8 *output, size_t outlen)
973 EVP_PKEY_CTX *pctx = EVP_PKEY_CTX_new_id(EVP_PKEY_HKDF, NULL);
974 size_t actual_outlen = outlen;
976 ASSERT(pctx != NULL);
977 ASSERT(EVP_PKEY_derive_init(pctx) > 0);
978 ASSERT(EVP_PKEY_CTX_set_hkdf_md(pctx, EVP_sha512()) > 0);
979 ASSERT(EVP_PKEY_CTX_set1_hkdf_key(pctx, ikm, ikmlen) > 0);
980 ASSERT(EVP_PKEY_CTX_set1_hkdf_salt(pctx, salt, saltlen) > 0);
981 ASSERT(EVP_PKEY_CTX_add1_hkdf_info(pctx, info, infolen) > 0);
982 ASSERT(EVP_PKEY_derive(pctx, output, &actual_outlen) > 0);
983 ASSERT(actual_outlen == outlen);
984 EVP_PKEY_CTX_free(pctx);
987 static void test_hkdf_sha512(void)
989 unsigned long num_tests = NUM_ALG_TEST_ITERATIONS;
991 while (num_tests--) {
992 u8 ikm[SHA512_DIGEST_SIZE];
993 u8 salt[SHA512_DIGEST_SIZE];
995 u8 actual_output[512];
996 u8 expected_output[sizeof(actual_output)];
997 size_t ikmlen = 1 + (rand() % sizeof(ikm));
998 size_t saltlen = rand() % (1 + sizeof(salt));
999 size_t infolen = rand() % (1 + sizeof(info));
1001 * Don't test zero-length outputs, since OpenSSL 3.0 and later
1002 * returns an error for those.
1004 size_t outlen = 1 + (rand() % sizeof(actual_output));
1006 rand_bytes(ikm, ikmlen);
1007 rand_bytes(salt, saltlen);
1008 rand_bytes(info, infolen);
1010 hkdf_sha512(ikm, ikmlen, salt, saltlen, info, infolen,
1011 actual_output, outlen);
1012 openssl_hkdf_sha512(ikm, ikmlen, salt, saltlen, info, infolen,
1013 expected_output, outlen);
1014 ASSERT(memcmp(actual_output, expected_output, outlen) == 0);
1017 #endif /* ENABLE_ALG_TESTS */
1019 /*----------------------------------------------------------------------------*
1021 *----------------------------------------------------------------------------*/
1024 * Reference: "AES-GCM-SIV: Nonce Misuse-Resistant Authenticated Encryption"
1025 * https://datatracker.ietf.org/doc/html/rfc8452
1028 #define POLYVAL_KEY_SIZE 16
1029 #define POLYVAL_BLOCK_SIZE 16
1031 static void polyval_update(const u8 key[POLYVAL_KEY_SIZE],
1032 const u8 *msg, size_t msglen,
1033 u8 accumulator[POLYVAL_BLOCK_SIZE])
1036 ble128 aligned_accumulator;
1037 // x^{-128} = x^127 + x^124 + x^121 + x^114 + 1
1038 static const ble128 inv128 = {
1040 cpu_to_le64(0x9204ULL << 48)
1043 /* Partial block support is not necessary for HCTR2 */
1044 ASSERT(msglen % POLYVAL_BLOCK_SIZE == 0);
1046 memcpy(&h, key, POLYVAL_BLOCK_SIZE);
1047 memcpy(&aligned_accumulator, accumulator, POLYVAL_BLOCK_SIZE);
1048 gf2_128_mul_polyval(&h, &inv128);
1050 while (msglen > 0) {
1051 xor((u8 *)&aligned_accumulator, (u8 *)&aligned_accumulator, msg,
1052 POLYVAL_BLOCK_SIZE);
1053 gf2_128_mul_polyval(&aligned_accumulator, &h);
1054 msg += POLYVAL_BLOCK_SIZE;
1055 msglen -= POLYVAL_BLOCK_SIZE;
1057 memcpy(accumulator, &aligned_accumulator, POLYVAL_BLOCK_SIZE);
1060 /*----------------------------------------------------------------------------*
1061 * AES encryption modes *
1062 *----------------------------------------------------------------------------*/
1064 static void aes_256_xts_crypt(const u8 key[2 * AES_256_KEY_SIZE],
1065 const u8 iv[AES_BLOCK_SIZE], const u8 *src,
1066 u8 *dst, size_t nbytes, bool decrypting)
1068 struct aes_key tweak_key, cipher_key;
1072 ASSERT(nbytes % AES_BLOCK_SIZE == 0);
1073 aes_setkey(&cipher_key, key, AES_256_KEY_SIZE);
1074 aes_setkey(&tweak_key, &key[AES_256_KEY_SIZE], AES_256_KEY_SIZE);
1075 aes_encrypt(&tweak_key, iv, (u8 *)&t);
1076 for (i = 0; i < nbytes; i += AES_BLOCK_SIZE) {
1077 xor(&dst[i], &src[i], (const u8 *)&t, AES_BLOCK_SIZE);
1079 aes_decrypt(&cipher_key, &dst[i], &dst[i]);
1081 aes_encrypt(&cipher_key, &dst[i], &dst[i]);
1082 xor(&dst[i], &dst[i], (const u8 *)&t, AES_BLOCK_SIZE);
1083 gf2_128_mul_x_xts(&t);
1087 static void aes_256_xts_encrypt(const u8 key[2 * AES_256_KEY_SIZE],
1088 const u8 iv[AES_BLOCK_SIZE], const u8 *src,
1089 u8 *dst, size_t nbytes)
1091 aes_256_xts_crypt(key, iv, src, dst, nbytes, false);
1094 static void aes_256_xts_decrypt(const u8 key[2 * AES_256_KEY_SIZE],
1095 const u8 iv[AES_BLOCK_SIZE], const u8 *src,
1096 u8 *dst, size_t nbytes)
1098 aes_256_xts_crypt(key, iv, src, dst, nbytes, true);
1101 #ifdef ENABLE_ALG_TESTS
1102 #include <openssl/evp.h>
1103 static void test_aes_256_xts(void)
1105 unsigned long num_tests = NUM_ALG_TEST_ITERATIONS;
1106 EVP_CIPHER_CTX *ctx = EVP_CIPHER_CTX_new();
1108 ASSERT(ctx != NULL);
1109 while (num_tests--) {
1110 u8 key[2 * AES_256_KEY_SIZE];
1111 u8 iv[AES_BLOCK_SIZE];
1112 u8 ptext[32 * AES_BLOCK_SIZE];
1113 u8 ctext[sizeof(ptext)];
1114 u8 ref_ctext[sizeof(ptext)];
1115 u8 decrypted[sizeof(ptext)];
1117 * Don't test message lengths that aren't a multiple of the AES
1118 * block size, since support for that is not implemented here.
1119 * Also don't test zero-length messages, since OpenSSL 3.0 and
1120 * later returns an error for those.
1122 const size_t datalen = AES_BLOCK_SIZE *
1123 (1 + rand() % (sizeof(ptext) / AES_BLOCK_SIZE));
1126 rand_bytes(key, sizeof(key));
1127 rand_bytes(iv, sizeof(iv));
1128 rand_bytes(ptext, datalen);
1130 aes_256_xts_encrypt(key, iv, ptext, ctext, datalen);
1131 res = EVP_EncryptInit_ex(ctx, EVP_aes_256_xts(), NULL, key, iv);
1133 res = EVP_EncryptUpdate(ctx, ref_ctext, &outl, ptext, datalen);
1135 ASSERT(outl == datalen);
1136 ASSERT(memcmp(ctext, ref_ctext, datalen) == 0);
1138 aes_256_xts_decrypt(key, iv, ctext, decrypted, datalen);
1139 ASSERT(memcmp(ptext, decrypted, datalen) == 0);
1141 EVP_CIPHER_CTX_free(ctx);
1143 #endif /* ENABLE_ALG_TESTS */
1145 static void aes_cbc_encrypt(const struct aes_key *k,
1146 const u8 iv[AES_BLOCK_SIZE],
1147 const u8 *src, u8 *dst, size_t nbytes)
1151 ASSERT(nbytes % AES_BLOCK_SIZE == 0);
1152 for (i = 0; i < nbytes; i += AES_BLOCK_SIZE) {
1153 xor(&dst[i], &src[i], (i == 0 ? iv : &dst[i - AES_BLOCK_SIZE]),
1155 aes_encrypt(k, &dst[i], &dst[i]);
1159 static void aes_cbc_decrypt(const struct aes_key *k,
1160 const u8 iv[AES_BLOCK_SIZE],
1161 const u8 *src, u8 *dst, size_t nbytes)
1165 ASSERT(i % AES_BLOCK_SIZE == 0);
1167 i -= AES_BLOCK_SIZE;
1168 aes_decrypt(k, &src[i], &dst[i]);
1169 xor(&dst[i], &dst[i], (i == 0 ? iv : &src[i - AES_BLOCK_SIZE]),
1174 static void aes_cts_cbc_encrypt(const u8 *key, int keysize,
1175 const u8 iv[AES_BLOCK_SIZE],
1176 const u8 *src, u8 *dst, size_t nbytes)
1178 const size_t offset = ROUND_DOWN(nbytes - 1, AES_BLOCK_SIZE);
1179 const size_t final_bsize = nbytes - offset;
1182 u8 buf[AES_BLOCK_SIZE];
1184 ASSERT(nbytes >= AES_BLOCK_SIZE);
1186 aes_setkey(&k, key, keysize);
1188 if (nbytes == AES_BLOCK_SIZE)
1189 return aes_cbc_encrypt(&k, iv, src, dst, nbytes);
1191 aes_cbc_encrypt(&k, iv, src, dst, offset);
1192 pad = &dst[offset - AES_BLOCK_SIZE];
1194 memcpy(buf, pad, AES_BLOCK_SIZE);
1195 xor(buf, buf, &src[offset], final_bsize);
1196 memcpy(&dst[offset], pad, final_bsize);
1197 aes_encrypt(&k, buf, pad);
1200 static void aes_cts_cbc_decrypt(const u8 *key, int keysize,
1201 const u8 iv[AES_BLOCK_SIZE],
1202 const u8 *src, u8 *dst, size_t nbytes)
1204 const size_t offset = ROUND_DOWN(nbytes - 1, AES_BLOCK_SIZE);
1205 const size_t final_bsize = nbytes - offset;
1209 ASSERT(nbytes >= AES_BLOCK_SIZE);
1211 aes_setkey(&k, key, keysize);
1213 if (nbytes == AES_BLOCK_SIZE)
1214 return aes_cbc_decrypt(&k, iv, src, dst, nbytes);
1216 pad = &dst[offset - AES_BLOCK_SIZE];
1217 aes_decrypt(&k, &src[offset - AES_BLOCK_SIZE], pad);
1218 xor(&dst[offset], &src[offset], pad, final_bsize);
1219 xor(pad, pad, &dst[offset], final_bsize);
1221 aes_cbc_decrypt(&k, (offset == AES_BLOCK_SIZE ?
1222 iv : &src[offset - 2 * AES_BLOCK_SIZE]),
1223 pad, pad, AES_BLOCK_SIZE);
1224 aes_cbc_decrypt(&k, iv, src, dst, offset - AES_BLOCK_SIZE);
1227 static void aes_256_cts_cbc_encrypt(const u8 key[AES_256_KEY_SIZE],
1228 const u8 iv[AES_BLOCK_SIZE],
1229 const u8 *src, u8 *dst, size_t nbytes)
1231 aes_cts_cbc_encrypt(key, AES_256_KEY_SIZE, iv, src, dst, nbytes);
1234 static void aes_256_cts_cbc_decrypt(const u8 key[AES_256_KEY_SIZE],
1235 const u8 iv[AES_BLOCK_SIZE],
1236 const u8 *src, u8 *dst, size_t nbytes)
1238 aes_cts_cbc_decrypt(key, AES_256_KEY_SIZE, iv, src, dst, nbytes);
1241 #ifdef ENABLE_ALG_TESTS
1242 #include <openssl/modes.h>
1243 static void aes_block128_f(const unsigned char in[16],
1244 unsigned char out[16], const void *key)
1246 aes_encrypt(key, in, out);
1249 static void test_aes_256_cts_cbc(void)
1251 unsigned long num_tests = NUM_ALG_TEST_ITERATIONS;
1253 while (num_tests--) {
1254 u8 key[AES_256_KEY_SIZE];
1255 u8 iv[AES_BLOCK_SIZE];
1256 u8 iv_copy[AES_BLOCK_SIZE];
1258 u8 ctext[sizeof(ptext)];
1259 u8 ref_ctext[sizeof(ptext)];
1260 u8 decrypted[sizeof(ptext)];
1261 const size_t datalen = 16 + (rand() % (sizeof(ptext) - 15));
1264 rand_bytes(key, sizeof(key));
1265 rand_bytes(iv, sizeof(iv));
1266 rand_bytes(ptext, datalen);
1268 aes_256_cts_cbc_encrypt(key, iv, ptext, ctext, datalen);
1270 /* OpenSSL doesn't allow datalen=AES_BLOCK_SIZE; Linux does */
1271 if (datalen != AES_BLOCK_SIZE) {
1272 aes_setkey(&k, key, sizeof(key));
1273 memcpy(iv_copy, iv, sizeof(iv));
1274 ASSERT(CRYPTO_cts128_encrypt_block(ptext, ref_ctext,
1275 datalen, &k, iv_copy,
1278 ASSERT(memcmp(ctext, ref_ctext, datalen) == 0);
1280 aes_256_cts_cbc_decrypt(key, iv, ctext, decrypted, datalen);
1281 ASSERT(memcmp(ptext, decrypted, datalen) == 0);
1284 #endif /* ENABLE_ALG_TESTS */
1286 static void essiv_generate_iv(const u8 orig_key[AES_128_KEY_SIZE],
1287 const u8 orig_iv[AES_BLOCK_SIZE],
1288 u8 real_iv[AES_BLOCK_SIZE])
1290 u8 essiv_key[SHA256_DIGEST_SIZE];
1291 struct aes_key essiv;
1293 /* AES encrypt the original IV using a hash of the original key */
1294 STATIC_ASSERT(SHA256_DIGEST_SIZE == AES_256_KEY_SIZE);
1295 sha256(orig_key, AES_128_KEY_SIZE, essiv_key);
1296 aes_setkey(&essiv, essiv_key, AES_256_KEY_SIZE);
1297 aes_encrypt(&essiv, orig_iv, real_iv);
1300 static void aes_128_cbc_essiv_encrypt(const u8 key[AES_128_KEY_SIZE],
1301 const u8 iv[AES_BLOCK_SIZE],
1302 const u8 *src, u8 *dst, size_t nbytes)
1305 u8 real_iv[AES_BLOCK_SIZE];
1307 aes_setkey(&k, key, AES_128_KEY_SIZE);
1308 essiv_generate_iv(key, iv, real_iv);
1309 aes_cbc_encrypt(&k, real_iv, src, dst, nbytes);
1312 static void aes_128_cbc_essiv_decrypt(const u8 key[AES_128_KEY_SIZE],
1313 const u8 iv[AES_BLOCK_SIZE],
1314 const u8 *src, u8 *dst, size_t nbytes)
1317 u8 real_iv[AES_BLOCK_SIZE];
1319 aes_setkey(&k, key, AES_128_KEY_SIZE);
1320 essiv_generate_iv(key, iv, real_iv);
1321 aes_cbc_decrypt(&k, real_iv, src, dst, nbytes);
1324 static void aes_128_cts_cbc_encrypt(const u8 key[AES_128_KEY_SIZE],
1325 const u8 iv[AES_BLOCK_SIZE],
1326 const u8 *src, u8 *dst, size_t nbytes)
1328 aes_cts_cbc_encrypt(key, AES_128_KEY_SIZE, iv, src, dst, nbytes);
1331 static void aes_128_cts_cbc_decrypt(const u8 key[AES_128_KEY_SIZE],
1332 const u8 iv[AES_BLOCK_SIZE],
1333 const u8 *src, u8 *dst, size_t nbytes)
1335 aes_cts_cbc_decrypt(key, AES_128_KEY_SIZE, iv, src, dst, nbytes);
1339 * Reference: "Length-preserving encryption with HCTR2"
1340 * https://ia.cr/2021/1441
1343 static void aes_256_xctr_crypt(const u8 key[AES_256_KEY_SIZE],
1344 const u8 iv[AES_BLOCK_SIZE], const u8 *src,
1345 u8 *dst, size_t nbytes)
1349 u8 bytes[AES_BLOCK_SIZE];
1354 aes_setkey(&k, key, AES_256_KEY_SIZE);
1356 for (i = 0; i < nbytes; i += AES_BLOCK_SIZE) {
1357 memcpy(blk.bytes, iv, AES_BLOCK_SIZE);
1358 blk.ctr ^= cpu_to_le64((i / AES_BLOCK_SIZE) + 1);
1359 aes_encrypt(&k, blk.bytes, blk.bytes);
1360 xor(&dst[i], blk.bytes, &src[i], MIN(AES_BLOCK_SIZE, nbytes - i));
1365 * Reference: "Length-preserving encryption with HCTR2"
1366 * https://ia.cr/2021/1441
1369 #define HCTR2_IV_SIZE 32
1370 static void hctr2_hash_iv(const u8 hbar[POLYVAL_KEY_SIZE],
1371 const u8 iv[HCTR2_IV_SIZE], size_t msglen,
1372 u8 digest[POLYVAL_BLOCK_SIZE])
1374 le128 tweaklen_blk = {
1375 .lo = cpu_to_le64(HCTR2_IV_SIZE * 8 * 2 + 2 +
1376 (msglen % AES_BLOCK_SIZE != 0))
1379 memset(digest, 0, POLYVAL_BLOCK_SIZE);
1380 polyval_update(hbar, (u8 *)&tweaklen_blk, POLYVAL_BLOCK_SIZE, digest);
1381 polyval_update(hbar, iv, HCTR2_IV_SIZE, digest);
1384 static void hctr2_hash_message(const u8 hbar[POLYVAL_KEY_SIZE],
1385 const u8 *msg, size_t msglen,
1386 u8 digest[POLYVAL_BLOCK_SIZE])
1388 size_t remainder = msglen % AES_BLOCK_SIZE;
1389 u8 padded_block[POLYVAL_BLOCK_SIZE] = {0};
1391 polyval_update(hbar, msg, msglen - remainder, digest);
1393 memcpy(padded_block, &msg[msglen - remainder], remainder);
1394 padded_block[remainder] = 1;
1395 polyval_update(hbar, padded_block, POLYVAL_BLOCK_SIZE, digest);
1399 static void aes_256_hctr2_crypt(const u8 key[AES_256_KEY_SIZE],
1400 const u8 iv[HCTR2_IV_SIZE], const u8 *src,
1401 u8 *dst, size_t nbytes, bool decrypting)
1404 u8 hbar[AES_BLOCK_SIZE] = {0};
1405 u8 L[AES_BLOCK_SIZE] = {1};
1406 size_t bulk_bytes = nbytes - AES_BLOCK_SIZE;
1407 u8 digest[POLYVAL_BLOCK_SIZE];
1409 const u8 *N = src + AES_BLOCK_SIZE;
1410 u8 MM[AES_BLOCK_SIZE];
1411 u8 UU[AES_BLOCK_SIZE];
1412 u8 S[AES_BLOCK_SIZE];
1414 u8 *V = dst + AES_BLOCK_SIZE;
1416 ASSERT(nbytes >= AES_BLOCK_SIZE);
1417 aes_setkey(&k, key, AES_256_KEY_SIZE);
1419 aes_encrypt(&k, hbar, hbar);
1420 aes_encrypt(&k, L, L);
1422 hctr2_hash_iv(hbar, iv, bulk_bytes, digest);
1423 hctr2_hash_message(hbar, N, bulk_bytes, digest);
1425 xor(MM, M, digest, AES_BLOCK_SIZE);
1428 aes_decrypt(&k, MM, UU);
1430 aes_encrypt(&k, MM, UU);
1432 xor(S, MM, UU, AES_BLOCK_SIZE);
1433 xor(S, L, S, AES_BLOCK_SIZE);
1435 aes_256_xctr_crypt(key, S, N, V, bulk_bytes);
1437 hctr2_hash_iv(hbar, iv, bulk_bytes, digest);
1438 hctr2_hash_message(hbar, V, bulk_bytes, digest);
1440 xor(U, UU, digest, AES_BLOCK_SIZE);
1443 static void aes_256_hctr2_encrypt(const u8 key[AES_256_KEY_SIZE],
1444 const u8 iv[HCTR2_IV_SIZE], const u8 *src,
1445 u8 *dst, size_t nbytes)
1447 aes_256_hctr2_crypt(key, iv, src, dst, nbytes, false);
1450 static void aes_256_hctr2_decrypt(const u8 key[AES_256_KEY_SIZE],
1451 const u8 iv[HCTR2_IV_SIZE], const u8 *src,
1452 u8 *dst, size_t nbytes)
1454 aes_256_hctr2_crypt(key, iv, src, dst, nbytes, true);
1457 #ifdef ENABLE_ALG_TESTS
1458 #include <linux/if_alg.h>
1459 #include <sys/socket.h>
1460 static void test_aes_256_hctr2(void)
1462 int algfd = socket(AF_ALG, SOCK_SEQPACKET, 0);
1463 struct sockaddr_alg addr = {
1464 .salg_type = "skcipher",
1465 .salg_name = "hctr2(aes)",
1467 unsigned long num_tests = NUM_ALG_TEST_ITERATIONS;
1470 die_errno("can't create AF_ALG socket");
1471 if (bind(algfd, (struct sockaddr *)&addr, sizeof(addr)) != 0)
1472 die_errno("can't bind AF_ALG socket to HCTR2 algorithm");
1474 while (num_tests--) {
1475 u8 key[AES_256_KEY_SIZE];
1476 u8 iv[HCTR2_IV_SIZE];
1478 u8 ctext[sizeof(ptext)];
1479 u8 ref_ctext[sizeof(ptext)];
1480 u8 decrypted[sizeof(ptext)];
1481 const size_t datalen = 16 + (rand() % (sizeof(ptext) - 15));
1483 rand_bytes(key, sizeof(key));
1484 rand_bytes(iv, sizeof(iv));
1485 rand_bytes(ptext, datalen);
1487 aes_256_hctr2_encrypt(key, iv, ptext, ctext, datalen);
1488 af_alg_crypt(algfd, ALG_OP_ENCRYPT, key, sizeof(key),
1489 iv, sizeof(iv), ptext, ref_ctext, datalen);
1490 ASSERT(memcmp(ctext, ref_ctext, datalen) == 0);
1492 aes_256_hctr2_decrypt(key, iv, ctext, decrypted, datalen);
1493 ASSERT(memcmp(ptext, decrypted, datalen) == 0);
1497 #endif /* ENABLE_ALG_TESTS */
1499 /*----------------------------------------------------------------------------*
1500 * XChaCha12 stream cipher *
1501 *----------------------------------------------------------------------------*/
1505 * - "XChaCha: eXtended-nonce ChaCha and AEAD_XChaCha20_Poly1305"
1506 * https://tools.ietf.org/html/draft-arciszewski-xchacha-03
1508 * - "ChaCha, a variant of Salsa20"
1509 * https://cr.yp.to/chacha/chacha-20080128.pdf
1511 * - "Extending the Salsa20 nonce"
1512 * https://cr.yp.to/snuffle/xsalsa-20081128.pdf
1515 #define CHACHA_KEY_SIZE 32
1516 #define XCHACHA_KEY_SIZE CHACHA_KEY_SIZE
1517 #define XCHACHA_NONCE_SIZE 24
1519 static void chacha_init_state(u32 state[16], const u8 key[CHACHA_KEY_SIZE],
1522 static const u8 consts[16] = "expand 32-byte k";
1525 for (i = 0; i < 4; i++)
1526 state[i] = get_unaligned_le32(&consts[i * sizeof(__le32)]);
1527 for (i = 0; i < 8; i++)
1528 state[4 + i] = get_unaligned_le32(&key[i * sizeof(__le32)]);
1529 for (i = 0; i < 4; i++)
1530 state[12 + i] = get_unaligned_le32(&iv[i * sizeof(__le32)]);
1533 #define CHACHA_QUARTERROUND(a, b, c, d) \
1535 a += b; d = rol32(d ^ a, 16); \
1536 c += d; b = rol32(b ^ c, 12); \
1537 a += b; d = rol32(d ^ a, 8); \
1538 c += d; b = rol32(b ^ c, 7); \
1541 static void chacha_permute(u32 x[16], int nrounds)
1545 CHACHA_QUARTERROUND(x[0], x[4], x[8], x[12]);
1546 CHACHA_QUARTERROUND(x[1], x[5], x[9], x[13]);
1547 CHACHA_QUARTERROUND(x[2], x[6], x[10], x[14]);
1548 CHACHA_QUARTERROUND(x[3], x[7], x[11], x[15]);
1550 /* diagonal round */
1551 CHACHA_QUARTERROUND(x[0], x[5], x[10], x[15]);
1552 CHACHA_QUARTERROUND(x[1], x[6], x[11], x[12]);
1553 CHACHA_QUARTERROUND(x[2], x[7], x[8], x[13]);
1554 CHACHA_QUARTERROUND(x[3], x[4], x[9], x[14]);
1555 } while ((nrounds -= 2) != 0);
1558 static void xchacha(const u8 key[XCHACHA_KEY_SIZE],
1559 const u8 nonce[XCHACHA_NONCE_SIZE],
1560 const u8 *src, u8 *dst, size_t nbytes, int nrounds)
1563 u8 real_key[CHACHA_KEY_SIZE];
1564 u8 real_iv[16] = { 0 };
1567 /* Compute real key using original key and first 128 nonce bits */
1568 chacha_init_state(state, key, nonce);
1569 chacha_permute(state, nrounds);
1570 for (i = 0; i < 8; i++) /* state words 0..3, 12..15 */
1571 put_unaligned_le32(state[(i < 4 ? 0 : 8) + i],
1572 &real_key[i * sizeof(__le32)]);
1574 /* Now do regular ChaCha, using real key and remaining nonce bits */
1575 memcpy(&real_iv[8], nonce + 16, 8);
1576 chacha_init_state(state, real_key, real_iv);
1577 for (i = 0; i < nbytes; i += 64) {
1579 __le32 keystream[16];
1581 memcpy(x, state, 64);
1582 chacha_permute(x, nrounds);
1583 for (j = 0; j < 16; j++)
1584 keystream[j] = cpu_to_le32(x[j] + state[j]);
1585 xor(&dst[i], &src[i], (u8 *)keystream, MIN(nbytes - i, 64));
1586 if (++state[12] == 0)
1591 static void xchacha12(const u8 key[XCHACHA_KEY_SIZE],
1592 const u8 nonce[XCHACHA_NONCE_SIZE],
1593 const u8 *src, u8 *dst, size_t nbytes)
1595 xchacha(key, nonce, src, dst, nbytes, 12);
1598 /*----------------------------------------------------------------------------*
1600 *----------------------------------------------------------------------------*/
1603 * Note: this is only the Poly1305 ε-almost-∆-universal hash function, not the
1604 * full Poly1305 MAC. I.e., it doesn't add anything at the end.
1607 #define POLY1305_KEY_SIZE 16
1608 #define POLY1305_BLOCK_SIZE 16
1610 static void poly1305(const u8 key[POLY1305_KEY_SIZE],
1611 const u8 *msg, size_t msglen, le128 *out)
1613 const u32 limb_mask = 0x3ffffff; /* limbs are base 2^26 */
1614 const u64 r0 = (get_unaligned_le32(key + 0) >> 0) & 0x3ffffff;
1615 const u64 r1 = (get_unaligned_le32(key + 3) >> 2) & 0x3ffff03;
1616 const u64 r2 = (get_unaligned_le32(key + 6) >> 4) & 0x3ffc0ff;
1617 const u64 r3 = (get_unaligned_le32(key + 9) >> 6) & 0x3f03fff;
1618 const u64 r4 = (get_unaligned_le32(key + 12) >> 8) & 0x00fffff;
1619 u32 h0 = 0, h1 = 0, h2 = 0, h3 = 0, h4 = 0;
1620 u32 g0, g1, g2, g3, g4, ge_p_mask;
1622 /* Partial block support is not necessary for Adiantum */
1623 ASSERT(msglen % POLY1305_BLOCK_SIZE == 0);
1626 u64 d0, d1, d2, d3, d4;
1629 h0 += (get_unaligned_le32(msg + 0) >> 0) & limb_mask;
1630 h1 += (get_unaligned_le32(msg + 3) >> 2) & limb_mask;
1631 h2 += (get_unaligned_le32(msg + 6) >> 4) & limb_mask;
1632 h3 += (get_unaligned_le32(msg + 9) >> 6) & limb_mask;
1633 h4 += (get_unaligned_le32(msg + 12) >> 8) | (1 << 24);
1636 d0 = h0*r0 + h1*5*r4 + h2*5*r3 + h3*5*r2 + h4*5*r1;
1637 d1 = h0*r1 + h1*r0 + h2*5*r4 + h3*5*r3 + h4*5*r2;
1638 d2 = h0*r2 + h1*r1 + h2*r0 + h3*5*r4 + h4*5*r3;
1639 d3 = h0*r3 + h1*r2 + h2*r1 + h3*r0 + h4*5*r4;
1640 d4 = h0*r4 + h1*r3 + h2*r2 + h3*r1 + h4*r0;
1642 /* (partial) h %= 2^130 - 5 */
1643 d1 += d0 >> 26; h0 = d0 & limb_mask;
1644 d2 += d1 >> 26; h1 = d1 & limb_mask;
1645 d3 += d2 >> 26; h2 = d2 & limb_mask;
1646 d4 += d3 >> 26; h3 = d3 & limb_mask;
1647 h0 += (d4 >> 26) * 5; h4 = d4 & limb_mask;
1648 h1 += h0 >> 26; h0 &= limb_mask;
1650 msg += POLY1305_BLOCK_SIZE;
1651 msglen -= POLY1305_BLOCK_SIZE;
1655 h2 += (h1 >> 26); h1 &= limb_mask;
1656 h3 += (h2 >> 26); h2 &= limb_mask;
1657 h4 += (h3 >> 26); h3 &= limb_mask;
1658 h0 += (h4 >> 26) * 5; h4 &= limb_mask;
1659 h1 += (h0 >> 26); h0 &= limb_mask;
1661 /* if (h >= 2^130 - 5) h -= 2^130 - 5; */
1663 g1 = h1 + (g0 >> 26); g0 &= limb_mask;
1664 g2 = h2 + (g1 >> 26); g1 &= limb_mask;
1665 g3 = h3 + (g2 >> 26); g2 &= limb_mask;
1666 g4 = h4 + (g3 >> 26); g3 &= limb_mask;
1667 ge_p_mask = ~((g4 >> 26) - 1); /* all 1's if h >= 2^130 - 5, else 0 */
1668 h0 = (h0 & ~ge_p_mask) | (g0 & ge_p_mask);
1669 h1 = (h1 & ~ge_p_mask) | (g1 & ge_p_mask);
1670 h2 = (h2 & ~ge_p_mask) | (g2 & ge_p_mask);
1671 h3 = (h3 & ~ge_p_mask) | (g3 & ge_p_mask);
1672 h4 = (h4 & ~ge_p_mask) | (g4 & ge_p_mask & limb_mask);
1675 out->lo = cpu_to_le64(((u64)h2 << 52) | ((u64)h1 << 26) | h0);
1676 out->hi = cpu_to_le64(((u64)h4 << 40) | ((u64)h3 << 14) | (h2 >> 12));
1679 /*----------------------------------------------------------------------------*
1680 * Adiantum encryption mode *
1681 *----------------------------------------------------------------------------*/
1684 * Reference: "Adiantum: length-preserving encryption for entry-level processors"
1685 * https://tosc.iacr.org/index.php/ToSC/article/view/7360
1688 #define ADIANTUM_KEY_SIZE 32
1689 #define ADIANTUM_IV_SIZE 32
1690 #define ADIANTUM_HASH_KEY_SIZE ((2 * POLY1305_KEY_SIZE) + NH_KEY_SIZE)
1692 #define NH_KEY_SIZE 1072
1693 #define NH_KEY_WORDS (NH_KEY_SIZE / sizeof(u32))
1694 #define NH_BLOCK_SIZE 1024
1695 #define NH_HASH_SIZE 32
1696 #define NH_MESSAGE_UNIT 16
1698 static u64 nh_pass(const u32 *key, const u8 *msg, size_t msglen)
1702 ASSERT(msglen % NH_MESSAGE_UNIT == 0);
1704 sum += (u64)(u32)(get_unaligned_le32(msg + 0) + key[0]) *
1705 (u32)(get_unaligned_le32(msg + 8) + key[2]);
1706 sum += (u64)(u32)(get_unaligned_le32(msg + 4) + key[1]) *
1707 (u32)(get_unaligned_le32(msg + 12) + key[3]);
1708 key += NH_MESSAGE_UNIT / sizeof(key[0]);
1709 msg += NH_MESSAGE_UNIT;
1710 msglen -= NH_MESSAGE_UNIT;
1715 /* NH ε-almost-universal hash function */
1716 static void nh(const u32 *key, const u8 *msg, size_t msglen,
1717 u8 result[NH_HASH_SIZE])
1721 for (i = 0; i < NH_HASH_SIZE; i += sizeof(__le64)) {
1722 put_unaligned_le64(nh_pass(key, msg, msglen), &result[i]);
1723 key += NH_MESSAGE_UNIT / sizeof(key[0]);
1727 /* Adiantum's ε-almost-∆-universal hash function */
1728 static void adiantum_hash(const u8 key[ADIANTUM_HASH_KEY_SIZE],
1729 const u8 iv[ADIANTUM_IV_SIZE],
1730 const u8 *msg, size_t msglen, le128 *result)
1732 const u8 *header_poly_key = key;
1733 const u8 *msg_poly_key = header_poly_key + POLY1305_KEY_SIZE;
1734 const u8 *nh_key = msg_poly_key + POLY1305_KEY_SIZE;
1735 u32 nh_key_words[NH_KEY_WORDS];
1736 u8 header[POLY1305_BLOCK_SIZE + ADIANTUM_IV_SIZE];
1737 const size_t num_nh_blocks = DIV_ROUND_UP(msglen, NH_BLOCK_SIZE);
1738 u8 *nh_hashes = xmalloc(num_nh_blocks * NH_HASH_SIZE);
1739 const size_t padded_msglen = ROUND_UP(msglen, NH_MESSAGE_UNIT);
1740 u8 *padded_msg = xmalloc(padded_msglen);
1744 for (i = 0; i < NH_KEY_WORDS; i++)
1745 nh_key_words[i] = get_unaligned_le32(&nh_key[i * sizeof(u32)]);
1747 /* Hash tweak and message length with first Poly1305 key */
1748 put_unaligned_le64((u64)msglen * 8, header);
1749 put_unaligned_le64(0, &header[sizeof(__le64)]);
1750 memcpy(&header[POLY1305_BLOCK_SIZE], iv, ADIANTUM_IV_SIZE);
1751 poly1305(header_poly_key, header, sizeof(header), &hash1);
1753 /* Hash NH hashes of message blocks using second Poly1305 key */
1754 /* (using a super naive way of handling the padding) */
1755 memcpy(padded_msg, msg, msglen);
1756 memset(&padded_msg[msglen], 0, padded_msglen - msglen);
1757 for (i = 0; i < num_nh_blocks; i++) {
1758 nh(nh_key_words, &padded_msg[i * NH_BLOCK_SIZE],
1759 MIN(NH_BLOCK_SIZE, padded_msglen - (i * NH_BLOCK_SIZE)),
1760 &nh_hashes[i * NH_HASH_SIZE]);
1762 poly1305(msg_poly_key, nh_hashes, num_nh_blocks * NH_HASH_SIZE, &hash2);
1764 /* Add the two hashes together to get the final hash */
1765 le128_add(result, &hash1, &hash2);
1771 static void adiantum_crypt(const u8 key[ADIANTUM_KEY_SIZE],
1772 const u8 iv[ADIANTUM_IV_SIZE], const u8 *src,
1773 u8 *dst, size_t nbytes, bool decrypting)
1775 u8 subkeys[AES_256_KEY_SIZE + ADIANTUM_HASH_KEY_SIZE] = { 0 };
1776 struct aes_key aes_key;
1778 u8 nonce[XCHACHA_NONCE_SIZE];
1780 } u = { .nonce = { 1 } };
1781 const size_t bulk_len = nbytes - sizeof(u.block);
1784 ASSERT(nbytes >= sizeof(u.block));
1786 /* Derive subkeys */
1787 xchacha12(key, u.nonce, subkeys, subkeys, sizeof(subkeys));
1788 aes_setkey(&aes_key, subkeys, AES_256_KEY_SIZE);
1790 /* Hash left part and add to right part */
1791 adiantum_hash(&subkeys[AES_256_KEY_SIZE], iv, src, bulk_len, &hash);
1792 memcpy(&u.block, &src[bulk_len], sizeof(u.block));
1793 le128_add(&u.block, &u.block, &hash);
1795 if (!decrypting) /* Encrypt right part with block cipher */
1796 aes_encrypt(&aes_key, u.nonce, u.nonce);
1798 /* Encrypt left part with stream cipher, using the computed nonce */
1799 u.nonce[sizeof(u.block)] = 1;
1800 xchacha12(key, u.nonce, src, dst, bulk_len);
1802 if (decrypting) /* Decrypt right part with block cipher */
1803 aes_decrypt(&aes_key, u.nonce, u.nonce);
1805 /* Finalize right part by subtracting hash of left part */
1806 adiantum_hash(&subkeys[AES_256_KEY_SIZE], iv, dst, bulk_len, &hash);
1807 le128_sub(&u.block, &u.block, &hash);
1808 memcpy(&dst[bulk_len], &u.block, sizeof(u.block));
1811 static void adiantum_encrypt(const u8 key[ADIANTUM_KEY_SIZE],
1812 const u8 iv[ADIANTUM_IV_SIZE],
1813 const u8 *src, u8 *dst, size_t nbytes)
1815 adiantum_crypt(key, iv, src, dst, nbytes, false);
1818 static void adiantum_decrypt(const u8 key[ADIANTUM_KEY_SIZE],
1819 const u8 iv[ADIANTUM_IV_SIZE],
1820 const u8 *src, u8 *dst, size_t nbytes)
1822 adiantum_crypt(key, iv, src, dst, nbytes, true);
1825 #ifdef ENABLE_ALG_TESTS
1826 static void test_adiantum(void)
1828 int algfd = socket(AF_ALG, SOCK_SEQPACKET, 0);
1829 struct sockaddr_alg addr = {
1830 .salg_type = "skcipher",
1831 .salg_name = "adiantum(xchacha12,aes)",
1833 unsigned long num_tests = NUM_ALG_TEST_ITERATIONS;
1836 die_errno("can't create AF_ALG socket");
1837 if (bind(algfd, (struct sockaddr *)&addr, sizeof(addr)) != 0)
1838 die_errno("can't bind AF_ALG socket to Adiantum algorithm");
1840 while (num_tests--) {
1841 u8 key[ADIANTUM_KEY_SIZE];
1842 u8 iv[ADIANTUM_IV_SIZE];
1844 u8 ctext[sizeof(ptext)];
1845 u8 ref_ctext[sizeof(ptext)];
1846 u8 decrypted[sizeof(ptext)];
1847 const size_t datalen = 16 + (rand() % (sizeof(ptext) - 15));
1849 rand_bytes(key, sizeof(key));
1850 rand_bytes(iv, sizeof(iv));
1851 rand_bytes(ptext, datalen);
1853 adiantum_encrypt(key, iv, ptext, ctext, datalen);
1854 af_alg_crypt(algfd, ALG_OP_ENCRYPT, key, sizeof(key),
1855 iv, sizeof(iv), ptext, ref_ctext, datalen);
1856 ASSERT(memcmp(ctext, ref_ctext, datalen) == 0);
1858 adiantum_decrypt(key, iv, ctext, decrypted, datalen);
1859 ASSERT(memcmp(ptext, decrypted, datalen) == 0);
1863 #endif /* ENABLE_ALG_TESTS */
1865 /*----------------------------------------------------------------------------*
1867 *----------------------------------------------------------------------------*/
1870 * Reference: "SipHash: a fast short-input PRF"
1871 * https://cr.yp.to/siphash/siphash-20120918.pdf
1876 v0 += v1; v2 += v3; \
1877 v1 = rol64(v1, 13); v3 = rol64(v3, 16); \
1878 v1 ^= v0; v3 ^= v2; \
1879 v0 = rol64(v0, 32); \
1880 v2 += v1; v0 += v3; \
1881 v1 = rol64(v1, 17); v3 = rol64(v3, 21); \
1882 v1 ^= v2; v3 ^= v0; \
1883 v2 = rol64(v2, 32); \
1886 /* Compute the SipHash-2-4 of a 64-bit number when formatted as little endian */
1887 static u64 siphash_1u64(const u64 key[2], u64 data)
1889 u64 v0 = key[0] ^ 0x736f6d6570736575ULL;
1890 u64 v1 = key[1] ^ 0x646f72616e646f6dULL;
1891 u64 v2 = key[0] ^ 0x6c7967656e657261ULL;
1892 u64 v3 = key[1] ^ 0x7465646279746573ULL;
1893 u64 m[2] = {data, (u64)sizeof(data) << 56};
1896 for (i = 0; i < ARRAY_SIZE(m); i++) {
1904 for (i = 0; i < 4; i++)
1906 return v0 ^ v1 ^ v2 ^ v3;
1909 /*----------------------------------------------------------------------------*
1911 *----------------------------------------------------------------------------*/
1913 #define FILE_NONCE_SIZE 16
1914 #define UUID_SIZE 16
1915 #define MAX_KEY_SIZE 64
1916 #define MAX_IV_SIZE ADIANTUM_IV_SIZE
1918 static const struct fscrypt_cipher {
1920 void (*encrypt)(const u8 *key, const u8 *iv, const u8 *src,
1921 u8 *dst, size_t nbytes);
1922 void (*decrypt)(const u8 *key, const u8 *iv, const u8 *src,
1923 u8 *dst, size_t nbytes);
1926 } fscrypt_ciphers[] = {
1928 .name = "AES-256-XTS",
1929 .encrypt = aes_256_xts_encrypt,
1930 .decrypt = aes_256_xts_decrypt,
1931 .keysize = 2 * AES_256_KEY_SIZE,
1933 .name = "AES-256-CTS-CBC",
1934 .encrypt = aes_256_cts_cbc_encrypt,
1935 .decrypt = aes_256_cts_cbc_decrypt,
1936 .keysize = AES_256_KEY_SIZE,
1937 .min_input_size = AES_BLOCK_SIZE,
1939 .name = "AES-128-CBC-ESSIV",
1940 .encrypt = aes_128_cbc_essiv_encrypt,
1941 .decrypt = aes_128_cbc_essiv_decrypt,
1942 .keysize = AES_128_KEY_SIZE,
1944 .name = "AES-128-CTS-CBC",
1945 .encrypt = aes_128_cts_cbc_encrypt,
1946 .decrypt = aes_128_cts_cbc_decrypt,
1947 .keysize = AES_128_KEY_SIZE,
1948 .min_input_size = AES_BLOCK_SIZE,
1950 .name = "AES-256-HCTR2",
1951 .encrypt = aes_256_hctr2_encrypt,
1952 .decrypt = aes_256_hctr2_decrypt,
1953 .keysize = AES_256_KEY_SIZE,
1954 .min_input_size = AES_BLOCK_SIZE,
1957 .encrypt = adiantum_encrypt,
1958 .decrypt = adiantum_decrypt,
1959 .keysize = ADIANTUM_KEY_SIZE,
1960 .min_input_size = AES_BLOCK_SIZE,
1964 static const struct fscrypt_cipher *find_fscrypt_cipher(const char *name)
1968 for (i = 0; i < ARRAY_SIZE(fscrypt_ciphers); i++) {
1969 if (strcmp(fscrypt_ciphers[i].name, name) == 0)
1970 return &fscrypt_ciphers[i];
1976 /* usual IV format */
1978 /* data unit index within the file */
1979 __le64 data_unit_index;
1981 /* per-file nonce; only set in DIRECT_KEY mode */
1982 u8 nonce[FILE_NONCE_SIZE];
1984 /* IV format for IV_INO_LBLK_* modes */
1987 * IV_INO_LBLK_64: data unit index within the file
1988 * IV_INO_LBLK_32: hashed inode number + data unit index within
1989 * the file, mod 2^32
1991 __le32 data_unit_index32;
1993 /* IV_INO_LBLK_64: inode number */
1994 __le32 inode_number;
1996 /* Any extra bytes up to the algorithm's IV size must be zeroed */
1997 u8 bytes[MAX_IV_SIZE];
2000 static void crypt_loop(const struct fscrypt_cipher *cipher, const u8 *key,
2001 union fscrypt_iv *iv, bool decrypting,
2002 size_t data_unit_size, size_t padding,
2003 bool is_data_unit_index_32bit)
2005 u8 *buf = xmalloc(data_unit_size);
2008 while ((res = xread(STDIN_FILENO, buf, data_unit_size)) > 0) {
2009 size_t crypt_len = data_unit_size;
2012 crypt_len = MAX(res, cipher->min_input_size);
2013 crypt_len = ROUND_UP(crypt_len, padding);
2014 crypt_len = MIN(crypt_len, data_unit_size);
2016 ASSERT(crypt_len >= res);
2017 memset(&buf[res], 0, crypt_len - res);
2020 cipher->decrypt(key, iv->bytes, buf, buf, crypt_len);
2022 cipher->encrypt(key, iv->bytes, buf, buf, crypt_len);
2024 full_write(STDOUT_FILENO, buf, crypt_len);
2026 if (is_data_unit_index_32bit)
2027 iv->data_unit_index32 = cpu_to_le32(
2028 le32_to_cpu(iv->data_unit_index32) + 1);
2030 iv->data_unit_index = cpu_to_le64(
2031 le64_to_cpu(iv->data_unit_index) + 1);
2036 /* The supported key derivation functions */
2037 enum kdf_algorithm {
2043 static enum kdf_algorithm parse_kdf_algorithm(const char *arg)
2045 if (strcmp(arg, "none") == 0)
2047 if (strcmp(arg, "AES-128-ECB") == 0)
2048 return KDF_AES_128_ECB;
2049 if (strcmp(arg, "HKDF-SHA512") == 0)
2050 return KDF_HKDF_SHA512;
2051 die("Unknown KDF: %s", arg);
2054 static u8 parse_mode_number(const char *arg)
2057 long num = strtol(arg, &tmp, 10);
2059 if (num <= 0 || *tmp || (u8)num != num)
2060 die("Invalid mode number: %s", arg);
2064 struct key_and_iv_params {
2065 u8 master_key[MAX_KEY_SIZE];
2066 int master_key_size;
2067 enum kdf_algorithm kdf;
2069 u8 file_nonce[FILE_NONCE_SIZE];
2070 bool file_nonce_specified;
2072 bool iv_ino_lblk_64;
2073 bool iv_ino_lblk_32;
2074 u64 data_unit_index;
2076 u8 fs_uuid[UUID_SIZE];
2077 bool fs_uuid_specified;
2080 #define HKDF_CONTEXT_KEY_IDENTIFIER 1
2081 #define HKDF_CONTEXT_PER_FILE_ENC_KEY 2
2082 #define HKDF_CONTEXT_DIRECT_KEY 3
2083 #define HKDF_CONTEXT_IV_INO_LBLK_64_KEY 4
2084 #define HKDF_CONTEXT_DIRHASH_KEY 5
2085 #define HKDF_CONTEXT_IV_INO_LBLK_32_KEY 6
2086 #define HKDF_CONTEXT_INODE_HASH_KEY 7
2088 /* Hash the file's inode number using SipHash keyed by a derived key */
2089 static u32 hash_inode_number(const struct key_and_iv_params *params)
2091 u8 info[9] = "fscrypt";
2097 info[8] = HKDF_CONTEXT_INODE_HASH_KEY;
2099 if (params->kdf != KDF_HKDF_SHA512)
2100 die("--iv-ino-lblk-32 requires --kdf=HKDF-SHA512");
2101 hkdf_sha512(params->master_key, params->master_key_size,
2102 NULL, 0, info, sizeof(info),
2103 hash_key.bytes, sizeof(hash_key));
2105 hash_key.words[0] = get_unaligned_le64(&hash_key.bytes[0]);
2106 hash_key.words[1] = get_unaligned_le64(&hash_key.bytes[8]);
2108 return (u32)siphash_1u64(hash_key.words, params->inode_number);
2111 static void derive_real_key(const struct key_and_iv_params *params,
2112 u8 *real_key, size_t real_key_size)
2114 struct aes_key aes_key;
2115 u8 info[8 + 1 + 1 + UUID_SIZE] = "fscrypt";
2119 ASSERT(real_key_size <= params->master_key_size);
2121 switch (params->kdf) {
2123 memcpy(real_key, params->master_key, real_key_size);
2125 case KDF_AES_128_ECB:
2126 if (!params->file_nonce_specified)
2127 die("--kdf=AES-128-ECB requires --file-nonce");
2128 STATIC_ASSERT(FILE_NONCE_SIZE == AES_128_KEY_SIZE);
2129 ASSERT(real_key_size % AES_BLOCK_SIZE == 0);
2130 aes_setkey(&aes_key, params->file_nonce, AES_128_KEY_SIZE);
2131 for (i = 0; i < real_key_size; i += AES_BLOCK_SIZE)
2132 aes_encrypt(&aes_key, ¶ms->master_key[i],
2135 case KDF_HKDF_SHA512:
2136 if (params->direct_key) {
2137 if (params->mode_num == 0)
2138 die("--direct-key with KDF requires --mode-num");
2139 info[infolen++] = HKDF_CONTEXT_DIRECT_KEY;
2140 info[infolen++] = params->mode_num;
2141 } else if (params->iv_ino_lblk_64) {
2142 if (params->mode_num == 0)
2143 die("--iv-ino-lblk-64 with KDF requires --mode-num");
2144 if (!params->fs_uuid_specified)
2145 die("--iv-ino-lblk-64 with KDF requires --fs-uuid");
2146 info[infolen++] = HKDF_CONTEXT_IV_INO_LBLK_64_KEY;
2147 info[infolen++] = params->mode_num;
2148 memcpy(&info[infolen], params->fs_uuid, UUID_SIZE);
2149 infolen += UUID_SIZE;
2150 } else if (params->iv_ino_lblk_32) {
2151 if (params->mode_num == 0)
2152 die("--iv-ino-lblk-32 with KDF requires --mode-num");
2153 if (!params->fs_uuid_specified)
2154 die("--iv-ino-lblk-32 with KDF requires --fs-uuid");
2155 info[infolen++] = HKDF_CONTEXT_IV_INO_LBLK_32_KEY;
2156 info[infolen++] = params->mode_num;
2157 memcpy(&info[infolen], params->fs_uuid, UUID_SIZE);
2158 infolen += UUID_SIZE;
2160 if (!params->file_nonce_specified)
2161 die("--kdf=HKDF-SHA512 requires --file-nonce or --iv-ino-lblk-{64,32}");
2162 info[infolen++] = HKDF_CONTEXT_PER_FILE_ENC_KEY;
2163 memcpy(&info[infolen], params->file_nonce,
2165 infolen += FILE_NONCE_SIZE;
2167 hkdf_sha512(params->master_key, params->master_key_size,
2168 NULL, 0, info, infolen, real_key, real_key_size);
2175 static void generate_iv(const struct key_and_iv_params *params,
2176 union fscrypt_iv *iv)
2178 memset(iv, 0, sizeof(*iv));
2179 if (params->direct_key) {
2180 if (!params->file_nonce_specified)
2181 die("--direct-key requires --file-nonce");
2182 iv->data_unit_index = cpu_to_le64(params->data_unit_index);
2183 memcpy(iv->nonce, params->file_nonce, FILE_NONCE_SIZE);
2184 } else if (params->iv_ino_lblk_64) {
2185 if (params->data_unit_index > UINT32_MAX)
2186 die("iv-ino-lblk-64 can't use --data-unit-index > UINT32_MAX");
2187 if (params->inode_number == 0)
2188 die("iv-ino-lblk-64 requires --inode-number");
2189 if (params->inode_number > UINT32_MAX)
2190 die("iv-ino-lblk-64 can't use --inode-number > UINT32_MAX");
2191 iv->data_unit_index32 = cpu_to_le32(params->data_unit_index);
2192 iv->inode_number = cpu_to_le32(params->inode_number);
2193 } else if (params->iv_ino_lblk_32) {
2194 if (params->data_unit_index > UINT32_MAX)
2195 die("iv-ino-lblk-32 can't use --data-unit-index > UINT32_MAX");
2196 if (params->inode_number == 0)
2197 die("iv-ino-lblk-32 requires --inode-number");
2198 iv->data_unit_index32 = cpu_to_le32(hash_inode_number(params) +
2199 params->data_unit_index);
2201 iv->data_unit_index = cpu_to_le64(params->data_unit_index);
2206 * Get the key and starting IV with which the encryption will actually be done.
2207 * If a KDF was specified, then a subkey is derived from the master key.
2208 * Otherwise, the master key is used directly.
2210 static void get_key_and_iv(const struct key_and_iv_params *params,
2211 u8 *real_key, size_t real_key_size,
2212 union fscrypt_iv *iv)
2216 iv_methods += params->direct_key;
2217 iv_methods += params->iv_ino_lblk_64;
2218 iv_methods += params->iv_ino_lblk_32;
2220 die("Conflicting IV methods specified");
2221 if (iv_methods > 0 && params->kdf == KDF_AES_128_ECB)
2222 die("--kdf=AES-128-ECB is incompatible with IV method options");
2224 derive_real_key(params, real_key, real_key_size);
2226 generate_iv(params, iv);
2229 static void do_dump_key_identifier(const struct key_and_iv_params *params)
2231 u8 info[9] = "fscrypt";
2232 u8 key_identifier[16];
2235 info[8] = HKDF_CONTEXT_KEY_IDENTIFIER;
2237 if (params->kdf != KDF_HKDF_SHA512)
2238 die("--dump-key-identifier requires --kdf=HKDF-SHA512");
2239 hkdf_sha512(params->master_key, params->master_key_size,
2240 NULL, 0, info, sizeof(info),
2241 key_identifier, sizeof(key_identifier));
2243 for (i = 0; i < sizeof(key_identifier); i++)
2244 printf("%02x", key_identifier[i]);
2247 static void parse_master_key(const char *arg, struct key_and_iv_params *params)
2249 params->master_key_size = hex2bin(arg, params->master_key,
2251 if (params->master_key_size < 0)
2252 die("Invalid master_key: %s", arg);
2256 OPT_DATA_UNIT_INDEX,
2260 OPT_DUMP_KEY_IDENTIFIER,
2272 static const struct option longopts[] = {
2273 { "data-unit-index", required_argument, NULL, OPT_DATA_UNIT_INDEX },
2274 { "data-unit-size", required_argument, NULL, OPT_DATA_UNIT_SIZE },
2275 { "decrypt", no_argument, NULL, OPT_DECRYPT },
2276 { "direct-key", no_argument, NULL, OPT_DIRECT_KEY },
2277 { "dump-key-identifier", no_argument, NULL, OPT_DUMP_KEY_IDENTIFIER },
2278 { "file-nonce", required_argument, NULL, OPT_FILE_NONCE },
2279 { "fs-uuid", required_argument, NULL, OPT_FS_UUID },
2280 { "help", no_argument, NULL, OPT_HELP },
2281 { "inode-number", required_argument, NULL, OPT_INODE_NUMBER },
2282 { "iv-ino-lblk-32", no_argument, NULL, OPT_IV_INO_LBLK_32 },
2283 { "iv-ino-lblk-64", no_argument, NULL, OPT_IV_INO_LBLK_64 },
2284 { "kdf", required_argument, NULL, OPT_KDF },
2285 { "mode-num", required_argument, NULL, OPT_MODE_NUM },
2286 { "padding", required_argument, NULL, OPT_PADDING },
2287 { NULL, 0, NULL, 0 },
2290 int main(int argc, char *argv[])
2292 size_t data_unit_size = 4096;
2293 bool decrypting = false;
2294 bool dump_key_identifier = false;
2295 struct key_and_iv_params params;
2297 const struct fscrypt_cipher *cipher;
2298 u8 real_key[MAX_KEY_SIZE];
2299 union fscrypt_iv iv;
2303 memset(¶ms, 0, sizeof(params));
2307 #ifdef ENABLE_ALG_TESTS
2312 test_aes_256_cts_cbc();
2314 test_aes_256_hctr2();
2317 while ((c = getopt_long(argc, argv, "", longopts, NULL)) != -1) {
2319 case OPT_DATA_UNIT_INDEX:
2321 params.data_unit_index = strtoull(optarg, &tmp, 10);
2323 die("Invalid data unit index: %s", optarg);
2325 case OPT_DATA_UNIT_SIZE:
2327 data_unit_size = strtoul(optarg, &tmp, 10);
2328 if (data_unit_size <= 0 || *tmp || errno)
2329 die("Invalid data unit size: %s", optarg);
2334 case OPT_DIRECT_KEY:
2335 params.direct_key = true;
2337 case OPT_DUMP_KEY_IDENTIFIER:
2338 dump_key_identifier = true;
2340 case OPT_FILE_NONCE:
2341 if (hex2bin(optarg, params.file_nonce, FILE_NONCE_SIZE)
2343 die("Invalid file nonce: %s", optarg);
2344 params.file_nonce_specified = true;
2347 if (hex2bin(optarg, params.fs_uuid, UUID_SIZE)
2349 die("Invalid filesystem UUID: %s", optarg);
2350 params.fs_uuid_specified = true;
2355 case OPT_INODE_NUMBER:
2357 params.inode_number = strtoull(optarg, &tmp, 10);
2358 if (params.inode_number <= 0 || *tmp || errno)
2359 die("Invalid inode number: %s", optarg);
2361 case OPT_IV_INO_LBLK_32:
2362 params.iv_ino_lblk_32 = true;
2364 case OPT_IV_INO_LBLK_64:
2365 params.iv_ino_lblk_64 = true;
2368 params.kdf = parse_kdf_algorithm(optarg);
2371 params.mode_num = parse_mode_number(optarg);
2374 padding = strtoul(optarg, &tmp, 10);
2375 if (padding <= 0 || *tmp || !is_power_of_2(padding) ||
2377 die("Invalid padding amount: %s", optarg);
2387 if (dump_key_identifier) {
2392 parse_master_key(argv[0], ¶ms);
2393 do_dump_key_identifier(¶ms);
2401 cipher = find_fscrypt_cipher(argv[0]);
2403 die("Unknown cipher: %s", argv[0]);
2405 if (data_unit_size < cipher->min_input_size)
2406 die("Data unit size of %zu bytes is too small for cipher %s",
2407 data_unit_size, cipher->name);
2409 parse_master_key(argv[1], ¶ms);
2411 if (params.master_key_size < cipher->keysize)
2412 die("Master key is too short for cipher %s", cipher->name);
2414 get_key_and_iv(¶ms, real_key, cipher->keysize, &iv);
2416 crypt_loop(cipher, real_key, &iv, decrypting, data_unit_size, padding,
2417 params.iv_ino_lblk_64 || params.iv_ino_lblk_32);