From: Matthew N. Heler Date: Fri, 6 Feb 2026 13:51:28 +0000 (-0600) Subject: rgw: add GCM hardware acceleration support via CryptoAccel X-Git-Url: http://git-server-git.apps.pok.os.sepia.ceph.com/?a=commitdiff_plain;h=6b2064a9bb23d2ccdcf85be3c11893402057be44;p=ceph-ci.git rgw: add GCM hardware acceleration support via CryptoAccel Extend the CryptoAccel plugin system to support AES-256-GCM encryption, following the same pattern established for CBC. The CryptoAccel base class now includes GCM constants (12-byte nonce, 16-byte tag) and pure virtual methods for gcm_encrypt, gcm_decrypt, and their batch variants. All derived classes must implement these methods, maintaining consistency with how CBC is handled. OpenSSL serves as the fallback when ISA-L is unavailable, using the EVP API with proper AAD handling. QAT stubs return false since GCM requires different session setup than CBC; a note has been added to the QAT acceleration documentation clarifying this limitation. The RGW integration follows the CBC pattern closely. The previous gcm_encrypt_chunk and gcm_decrypt_chunk functions have been unified into gcm_transform() with two overloads: one for EVP-only operation and one that uses the accelerator exclusively when available, falling back to EVP only when no accelerator can be loaded. Static assertions ensure the nonce and tag sizes stay consistent between the acceleration layer and RGW. Co-Authored-By: Claude Opus 4.5 Signed-off-by: Matthew N. Heler --- diff --git a/doc/radosgw/qat-accel.rst b/doc/radosgw/qat-accel.rst index 3d6a8e81db0..81e24eb9d1e 100644 --- a/doc/radosgw/qat-accel.rst +++ b/doc/radosgw/qat-accel.rst @@ -63,6 +63,11 @@ which means that Ceph will not be able to utilize QAT hardware features for crypto operations based on the OpenSSL crypto plugin. As a result, one QAT plugin based on native QAT API is added into the crypto framework. +.. note:: QAT acceleration currently supports only AES-256-CBC encryption mode. + The AES-256-GCM encryption mode (see :ref:`radosgw-encryption`) uses ISA-L + or OpenSSL acceleration instead. QAT support for GCM may be added in a + future release. + 2. QAT Support for Compression As mentioned above, QAT support for compression is based on the QATzip library diff --git a/src/crypto/crypto_accel.h b/src/crypto/crypto_accel.h index f2ba61906b4..37d77b9e4aa 100644 --- a/src/crypto/crypto_accel.h +++ b/src/crypto/crypto_accel.h @@ -30,6 +30,14 @@ class CryptoAccel { static const int AES_256_IVSIZE = 128/8; static const int AES_256_KEYSIZE = 256/8; + + /** + * GCM constants (nonce size is 12 bytes; distinct from CBC's 16-byte IV). + * NIST SP 800-38D recommends 96-bit (12-byte) IVs for GCM. + */ + static const int AES_GCM_NONCE_SIZE = 96/8; // 12 bytes + static const int AES_GCM_TAGSIZE = 16; // 128-bit auth tag + virtual bool cbc_encrypt(unsigned char* out, const unsigned char* in, size_t size, const unsigned char (&iv)[AES_256_IVSIZE], const unsigned char (&key)[AES_256_KEYSIZE], @@ -46,5 +54,30 @@ class CryptoAccel { const unsigned char iv[][AES_256_IVSIZE], const unsigned char (&key)[AES_256_KEYSIZE], optional_yield y) = 0; + + virtual bool gcm_encrypt(unsigned char* out, const unsigned char* in, size_t size, + const unsigned char (&iv)[AES_GCM_NONCE_SIZE], + const unsigned char (&key)[AES_256_KEYSIZE], + const unsigned char* aad, size_t aad_len, + unsigned char (&tag)[AES_GCM_TAGSIZE], + optional_yield y) = 0; + virtual bool gcm_decrypt(unsigned char* out, const unsigned char* in, size_t size, + const unsigned char (&iv)[AES_GCM_NONCE_SIZE], + const unsigned char (&key)[AES_256_KEYSIZE], + const unsigned char* aad, size_t aad_len, + const unsigned char (&tag)[AES_GCM_TAGSIZE], + optional_yield y) = 0; + virtual bool gcm_encrypt_batch(unsigned char* out, const unsigned char* in, size_t size, + const unsigned char iv[][AES_GCM_NONCE_SIZE], + const unsigned char (&key)[AES_256_KEYSIZE], + const unsigned char* const aad[], const size_t aad_len[], + unsigned char tag[][AES_GCM_TAGSIZE], + optional_yield y) = 0; + virtual bool gcm_decrypt_batch(unsigned char* out, const unsigned char* in, size_t size, + const unsigned char iv[][AES_GCM_NONCE_SIZE], + const unsigned char (&key)[AES_256_KEYSIZE], + const unsigned char* const aad[], const size_t aad_len[], + const unsigned char tag[][AES_GCM_TAGSIZE], + optional_yield y) = 0; }; #endif diff --git a/src/crypto/isa-l/isal_crypto_accel.cc b/src/crypto/isa-l/isal_crypto_accel.cc index a22cd2c4fa0..f44ed228af0 100644 --- a/src/crypto/isa-l/isal_crypto_accel.cc +++ b/src/crypto/isa-l/isal_crypto_accel.cc @@ -15,6 +15,10 @@ #include "crypto/isa-l/isal_crypto_accel.h" #include "crypto/isa-l/isa-l_crypto/include/aes_cbc.h" +#include "crypto/isa-l/isa-l_crypto/include/aes_gcm.h" +#include "include/compat.h" // for ceph_memzero_s +#include +#include bool ISALCryptoAccel::cbc_encrypt(unsigned char* out, const unsigned char* in, size_t size, const unsigned char (&iv)[AES_256_IVSIZE], @@ -43,3 +47,117 @@ bool ISALCryptoAccel::cbc_decrypt(unsigned char* out, const unsigned char* in, s aes_cbc_dec_256(const_cast(in), const_cast(&iv[0]), keys_blk.dec_keys, out, size); return true; } + +/** + * Constant-time byte comparison to prevent timing attacks on tag verification. + * Always compares all bytes regardless of differences found. + */ +static inline bool ct_memeq(const unsigned char* a, const unsigned char* b, size_t len) +{ + volatile unsigned char diff = 0; + for (size_t i = 0; i < len; ++i) { + diff |= static_cast(a[i] ^ b[i]); + } + return diff == 0; +} + +/** + * Thread-local GCM key cache to avoid re-running aes_gcm_pre_256() for + * repeated keys. Key material is securely wiped on key change and thread exit. + */ +static inline const gcm_key_data* get_cached_gcm_key(const unsigned char* key) +{ + struct gcm_key_cache_t { + bool valid = false; + unsigned char last_key[CryptoAccel::AES_256_KEYSIZE]; + alignas(16) gcm_key_data cached_gkey; + + void purge() { + if (valid) { + ceph_memzero_s(last_key, sizeof(last_key), sizeof(last_key)); + ceph_memzero_s(&cached_gkey, sizeof(cached_gkey), sizeof(cached_gkey)); + valid = false; + } + } + + ~gcm_key_cache_t() { purge(); } + }; + + static thread_local gcm_key_cache_t cache; + + if (!cache.valid || !ct_memeq(cache.last_key, key, CryptoAccel::AES_256_KEYSIZE)) { + cache.purge(); + aes_gcm_pre_256(key, &cache.cached_gkey); + memcpy(cache.last_key, key, CryptoAccel::AES_256_KEYSIZE); + cache.valid = true; + } + + return &cache.cached_gkey; +} + +bool ISALCryptoAccel::gcm_encrypt(unsigned char* out, const unsigned char* in, size_t size, + const unsigned char (&iv)[AES_GCM_NONCE_SIZE], + const unsigned char (&key)[AES_256_KEYSIZE], + const unsigned char* aad, size_t aad_len, + unsigned char (&tag)[AES_GCM_TAGSIZE], + optional_yield y) +{ + if (!out || !in) { + return false; + } + + const gcm_key_data* gkey = get_cached_gcm_key(&key[0]); + alignas(16) struct gcm_context_data gctx; + + // Copy IV (ISA-L may modify it internally) + uint8_t iv_copy[AES_GCM_NONCE_SIZE]; + memcpy(iv_copy, &iv[0], AES_GCM_NONCE_SIZE); + + aes_gcm_enc_256(gkey, &gctx, + reinterpret_cast(out), + reinterpret_cast(in), + static_cast(size), + iv_copy, + reinterpret_cast(aad), + static_cast(aad_len), + &tag[0], AES_GCM_TAGSIZE); + + return true; +} + +bool ISALCryptoAccel::gcm_decrypt(unsigned char* out, const unsigned char* in, size_t size, + const unsigned char (&iv)[AES_GCM_NONCE_SIZE], + const unsigned char (&key)[AES_256_KEYSIZE], + const unsigned char* aad, size_t aad_len, + const unsigned char (&tag)[AES_GCM_TAGSIZE], + optional_yield y) +{ + if (!out || !in) { + return false; + } + + const gcm_key_data* gkey = get_cached_gcm_key(&key[0]); + alignas(16) struct gcm_context_data gctx; + + uint8_t iv_copy[AES_GCM_NONCE_SIZE]; + memcpy(iv_copy, &iv[0], AES_GCM_NONCE_SIZE); + + // Decrypt and compute tag + unsigned char computed_tag[AES_GCM_TAGSIZE]; + aes_gcm_dec_256(gkey, &gctx, + reinterpret_cast(out), + reinterpret_cast(in), + static_cast(size), + iv_copy, + reinterpret_cast(aad), + static_cast(aad_len), + computed_tag, AES_GCM_TAGSIZE); + + // Constant-time tag comparison + if (!ct_memeq(computed_tag, &tag[0], AES_GCM_TAGSIZE)) { + memset(out, 0, size); // Clear output on auth failure + return false; + } + + return true; +} diff --git a/src/crypto/isa-l/isal_crypto_accel.h b/src/crypto/isa-l/isal_crypto_accel.h index 7fffd5122bc..b2ce33a3fd7 100644 --- a/src/crypto/isa-l/isal_crypto_accel.h +++ b/src/crypto/isa-l/isal_crypto_accel.h @@ -38,5 +38,33 @@ class ISALCryptoAccel : public CryptoAccel { const unsigned char iv[][AES_256_IVSIZE], const unsigned char (&key)[AES_256_KEYSIZE], optional_yield y) override { return false; } + + bool gcm_encrypt(unsigned char* out, const unsigned char* in, size_t size, + const unsigned char (&iv)[AES_GCM_NONCE_SIZE], + const unsigned char (&key)[AES_256_KEYSIZE], + const unsigned char* aad, size_t aad_len, + unsigned char (&tag)[AES_GCM_TAGSIZE], + optional_yield y) override; + + bool gcm_decrypt(unsigned char* out, const unsigned char* in, size_t size, + const unsigned char (&iv)[AES_GCM_NONCE_SIZE], + const unsigned char (&key)[AES_256_KEYSIZE], + const unsigned char* aad, size_t aad_len, + const unsigned char (&tag)[AES_GCM_TAGSIZE], + optional_yield y) override; + + bool gcm_encrypt_batch(unsigned char* out, const unsigned char* in, size_t size, + const unsigned char iv[][AES_GCM_NONCE_SIZE], + const unsigned char (&key)[AES_256_KEYSIZE], + const unsigned char* const aad[], const size_t aad_len[], + unsigned char tag[][AES_GCM_TAGSIZE], + optional_yield y) override { return false; } + + bool gcm_decrypt_batch(unsigned char* out, const unsigned char* in, size_t size, + const unsigned char iv[][AES_GCM_NONCE_SIZE], + const unsigned char (&key)[AES_256_KEYSIZE], + const unsigned char* const aad[], const size_t aad_len[], + const unsigned char tag[][AES_GCM_TAGSIZE], + optional_yield y) override { return false; } }; #endif diff --git a/src/crypto/openssl/openssl_crypto_accel.cc b/src/crypto/openssl/openssl_crypto_accel.cc index 4944205dd3f..fd564ba885c 100644 --- a/src/crypto/openssl/openssl_crypto_accel.cc +++ b/src/crypto/openssl/openssl_crypto_accel.cc @@ -103,3 +103,128 @@ bool OpenSSLCryptoAccel::cbc_decrypt(unsigned char* out, const unsigned char* in nullptr, // Hardware acceleration engine can be used in the future EVP_aes_256_cbc(), AES_DECRYPT); } + +bool OpenSSLCryptoAccel::gcm_encrypt(unsigned char* out, const unsigned char* in, size_t size, + const unsigned char (&iv)[AES_GCM_NONCE_SIZE], + const unsigned char (&key)[AES_256_KEYSIZE], + const unsigned char* aad, size_t aad_len, + unsigned char (&tag)[AES_GCM_TAGSIZE], + optional_yield y) +{ + using pctx_t = std::unique_ptr; + pctx_t pctx{ EVP_CIPHER_CTX_new(), EVP_CIPHER_CTX_free }; + + if (!pctx) { + derr << "failed to create evp cipher context for GCM encrypt" << dendl; + return false; + } + + int outlen; + + if (EVP_EncryptInit_ex(pctx.get(), EVP_aes_256_gcm(), nullptr, nullptr, nullptr) != EVP_SUCCESS) { + derr << "EVP_EncryptInit_ex failed for GCM" << dendl; + return false; + } + + if (EVP_CIPHER_CTX_ctrl(pctx.get(), EVP_CTRL_GCM_SET_IVLEN, AES_GCM_NONCE_SIZE, nullptr) != EVP_SUCCESS) { + derr << "failed to set GCM IV length" << dendl; + return false; + } + + if (EVP_EncryptInit_ex(pctx.get(), nullptr, nullptr, &key[0], &iv[0]) != EVP_SUCCESS) { + derr << "failed to set GCM key/IV" << dendl; + return false; + } + + // Add AAD (pass nullptr for out to process AAD only) + if (aad_len > 0) { + if (EVP_EncryptUpdate(pctx.get(), nullptr, &outlen, aad, + static_cast(aad_len)) != EVP_SUCCESS) { + derr << "failed to set GCM AAD" << dendl; + return false; + } + } + + if (EVP_EncryptUpdate(pctx.get(), out, &outlen, in, + static_cast(size)) != EVP_SUCCESS) { + derr << "EVP_EncryptUpdate failed for GCM" << dendl; + return false; + } + + int final_len; + if (EVP_EncryptFinal_ex(pctx.get(), out + outlen, &final_len) != EVP_SUCCESS) { + derr << "EVP_EncryptFinal_ex failed for GCM" << dendl; + return false; + } + + if (EVP_CIPHER_CTX_ctrl(pctx.get(), EVP_CTRL_GCM_GET_TAG, AES_GCM_TAGSIZE, &tag[0]) != EVP_SUCCESS) { + derr << "failed to get GCM tag" << dendl; + return false; + } + + return true; +} + +bool OpenSSLCryptoAccel::gcm_decrypt(unsigned char* out, const unsigned char* in, size_t size, + const unsigned char (&iv)[AES_GCM_NONCE_SIZE], + const unsigned char (&key)[AES_256_KEYSIZE], + const unsigned char* aad, size_t aad_len, + const unsigned char (&tag)[AES_GCM_TAGSIZE], + optional_yield y) +{ + using pctx_t = std::unique_ptr; + pctx_t pctx{ EVP_CIPHER_CTX_new(), EVP_CIPHER_CTX_free }; + + if (!pctx) { + derr << "failed to create evp cipher context for GCM decrypt" << dendl; + return false; + } + + int outlen; + + if (EVP_DecryptInit_ex(pctx.get(), EVP_aes_256_gcm(), nullptr, nullptr, nullptr) != EVP_SUCCESS) { + derr << "EVP_DecryptInit_ex failed for GCM" << dendl; + return false; + } + + if (EVP_CIPHER_CTX_ctrl(pctx.get(), EVP_CTRL_GCM_SET_IVLEN, AES_GCM_NONCE_SIZE, nullptr) != EVP_SUCCESS) { + derr << "failed to set GCM IV length" << dendl; + return false; + } + + if (EVP_DecryptInit_ex(pctx.get(), nullptr, nullptr, &key[0], &iv[0]) != EVP_SUCCESS) { + derr << "failed to set GCM key/IV" << dendl; + return false; + } + + if (EVP_CIPHER_CTX_ctrl(pctx.get(), EVP_CTRL_GCM_SET_TAG, AES_GCM_TAGSIZE, + const_cast(&tag[0])) != EVP_SUCCESS) { + derr << "failed to set GCM expected tag" << dendl; + return false; + } + + // Add AAD (must match encryption) + if (aad_len > 0) { + if (EVP_DecryptUpdate(pctx.get(), nullptr, &outlen, aad, + static_cast(aad_len)) != EVP_SUCCESS) { + derr << "failed to set GCM AAD" << dendl; + return false; + } + } + + if (EVP_DecryptUpdate(pctx.get(), out, &outlen, in, + static_cast(size)) != EVP_SUCCESS) { + derr << "EVP_DecryptUpdate failed for GCM" << dendl; + return false; + } + + int final_len; + if (EVP_DecryptFinal_ex(pctx.get(), out + outlen, &final_len) != EVP_SUCCESS) { + // Authentication failure - tag mismatch + derr << "GCM authentication failed - tag mismatch" << dendl; + memset(out, 0, size); // Clear output on auth failure + return false; + } + + return true; +} diff --git a/src/crypto/openssl/openssl_crypto_accel.h b/src/crypto/openssl/openssl_crypto_accel.h index 90edf1ec6ec..7a7e1a82706 100644 --- a/src/crypto/openssl/openssl_crypto_accel.h +++ b/src/crypto/openssl/openssl_crypto_accel.h @@ -39,5 +39,33 @@ class OpenSSLCryptoAccel : public CryptoAccel { const unsigned char iv[][AES_256_IVSIZE], const unsigned char (&key)[AES_256_KEYSIZE], optional_yield y) override { return false; } + + bool gcm_encrypt(unsigned char* out, const unsigned char* in, size_t size, + const unsigned char (&iv)[AES_GCM_NONCE_SIZE], + const unsigned char (&key)[AES_256_KEYSIZE], + const unsigned char* aad, size_t aad_len, + unsigned char (&tag)[AES_GCM_TAGSIZE], + optional_yield y) override; + + bool gcm_decrypt(unsigned char* out, const unsigned char* in, size_t size, + const unsigned char (&iv)[AES_GCM_NONCE_SIZE], + const unsigned char (&key)[AES_256_KEYSIZE], + const unsigned char* aad, size_t aad_len, + const unsigned char (&tag)[AES_GCM_TAGSIZE], + optional_yield y) override; + + bool gcm_encrypt_batch(unsigned char* out, const unsigned char* in, size_t size, + const unsigned char iv[][AES_GCM_NONCE_SIZE], + const unsigned char (&key)[AES_256_KEYSIZE], + const unsigned char* const aad[], const size_t aad_len[], + unsigned char tag[][AES_GCM_TAGSIZE], + optional_yield y) override { return false; } + + bool gcm_decrypt_batch(unsigned char* out, const unsigned char* in, size_t size, + const unsigned char iv[][AES_GCM_NONCE_SIZE], + const unsigned char (&key)[AES_256_KEYSIZE], + const unsigned char* const aad[], const size_t aad_len[], + const unsigned char tag[][AES_GCM_TAGSIZE], + optional_yield y) override { return false; } }; #endif diff --git a/src/crypto/qat/qat_crypto_accel.h b/src/crypto/qat/qat_crypto_accel.h index 714575799a9..e2815448d4f 100644 --- a/src/crypto/qat/qat_crypto_accel.h +++ b/src/crypto/qat/qat_crypto_accel.h @@ -42,5 +42,30 @@ class QccCryptoAccel : public CryptoAccel { const unsigned char iv[][AES_256_IVSIZE], const unsigned char (&key)[AES_256_KEYSIZE], optional_yield y) override; + + bool gcm_encrypt(unsigned char* out, const unsigned char* in, size_t size, + const unsigned char (&iv)[AES_GCM_NONCE_SIZE], + const unsigned char (&key)[AES_256_KEYSIZE], + const unsigned char* aad, size_t aad_len, + unsigned char (&tag)[AES_GCM_TAGSIZE], + optional_yield y) override { return false; } + bool gcm_decrypt(unsigned char* out, const unsigned char* in, size_t size, + const unsigned char (&iv)[AES_GCM_NONCE_SIZE], + const unsigned char (&key)[AES_256_KEYSIZE], + const unsigned char* aad, size_t aad_len, + const unsigned char (&tag)[AES_GCM_TAGSIZE], + optional_yield y) override { return false; } + bool gcm_encrypt_batch(unsigned char* out, const unsigned char* in, size_t size, + const unsigned char iv[][AES_GCM_NONCE_SIZE], + const unsigned char (&key)[AES_256_KEYSIZE], + const unsigned char* const aad[], const size_t aad_len[], + unsigned char tag[][AES_GCM_TAGSIZE], + optional_yield y) override { return false; } + bool gcm_decrypt_batch(unsigned char* out, const unsigned char* in, size_t size, + const unsigned char iv[][AES_GCM_NONCE_SIZE], + const unsigned char (&key)[AES_256_KEYSIZE], + const unsigned char* const aad[], const size_t aad_len[], + const unsigned char tag[][AES_GCM_TAGSIZE], + optional_yield y) override { return false; } }; #endif diff --git a/src/rgw/driver/rados/rgw_rados.cc b/src/rgw/driver/rados/rgw_rados.cc index 1aa2c086cd8..c4e273bdbd4 100644 --- a/src/rgw/driver/rados/rgw_rados.cc +++ b/src/rgw/driver/rados/rgw_rados.cc @@ -5116,11 +5116,19 @@ int RGWRados::copy_obj(RGWObjectCtx& src_obj_ctx, src_attrs.erase(RGW_ATTR_OBJ_REPLICATION_STATUS); /** - * Drop encryption attributes - will be generated by copy_obj_data() if - * encryption is requested. CRYPT_ORIGINAL_SIZE and CRYPT_PARTS are preserved - * for size calculations. CRYPT_PART_NUMS must be erased because copy writes - * data as a single stream (part 0), so stale part numbers from a multipart - * source would cause wrong key derivation. + * Drop encryption key-related attributes — they will be regenerated by + * copy_obj_data() if encryption is requested. + * + * CRYPT_ORIGINAL_SIZE and CRYPT_PARTS are intentionally preserved here. + * In the zero-copy path (!copy_data), the object's raw bytes are not + * rewritten, so the encryption state (ciphertext layout, sizes) is + * unchanged and these size attributes remain valid. In the copy_data + * path, they are erased separately once the data is rewritten as a + * single stream. + * + * CRYPT_PART_NUMS must be erased because copy writes data as a single + * stream (part 0), so stale part numbers from a multipart source would + * cause wrong key derivation. */ src_attrs.erase(RGW_ATTR_CRYPT_KEYSEL); src_attrs.erase(RGW_ATTR_CRYPT_CONTEXT); @@ -7099,7 +7107,8 @@ int RGWRados::get_obj_state_impl(const DoutPrefixProvider *dpp, RGWObjectCtx *oc } /** - * For AEAD encryption: adjust accounted_size to original size. + * For AEAD encryption: adjust accounted_size to original plaintext size. + * This ensures Content-Length headers and range requests use plaintext size. * Helpers return false for non-AEAD modes (including CBC), so this is a no-op * outside of AEAD. * Must be after manifest handling since manifest->get_obj_size() returns diff --git a/src/rgw/rgw_crypt.cc b/src/rgw/rgw_crypt.cc index bce48082ee3..61a92a1e851 100644 --- a/src/rgw/rgw_crypt.cc +++ b/src/rgw/rgw_crypt.cc @@ -29,6 +29,15 @@ #define dout_context g_ceph_context #define dout_subsys ceph_subsys_rgw +/** + * Ensure CryptoAccel GCM constants match RGW GCM constants. + * Prevents silent IV/tag size mismatches between acceleration layer and RGW. + */ +static_assert(CryptoAccel::AES_GCM_NONCE_SIZE == AES_256_GCM_NONCE_SIZE, + "CryptoAccel and RGW GCM nonce sizes must match"); +static_assert(CryptoAccel::AES_GCM_TAGSIZE == AEAD_TAG_SIZE, + "CryptoAccel and RGW GCM tag sizes must match"); + using namespace std; using namespace rgw; @@ -898,13 +907,15 @@ public: aad[7] = chunk_index & 0xFF; } - bool gcm_encrypt_chunk(unsigned char* out, - const unsigned char* in, - size_t size, - const unsigned char (&iv)[AES_256_IVSIZE], - const unsigned char (&key)[AES_256_KEYSIZE], - unsigned char* tag, - uint64_t chunk_index) + // GCM transform using OpenSSL EVP (no acceleration) + bool gcm_transform(unsigned char* out, + const unsigned char* in, + size_t size, + const unsigned char (&iv)[AES_256_IVSIZE], + const unsigned char (&key)[AES_256_KEYSIZE], + unsigned char* tag, + uint64_t chunk_index, + bool encrypt) { using pctx_t = std::unique_ptr; pctx_t pctx{ EVP_CIPHER_CTX_new(), EVP_CIPHER_CTX_free }; @@ -914,122 +925,135 @@ public: return false; } - // 1st init: set cipher type - if (1 != EVP_EncryptInit_ex(pctx.get(), EVP_aes_256_gcm(), - nullptr, nullptr, nullptr)) { - ldpp_dout(dpp, 5) << "EVP: failed to initialize GCM" << dendl; - return false; - } + uint8_t aad[8]; + encode_chunk_aad(aad, chunk_index); - // Verify IV size (should be 12 bytes for GCM) - if (EVP_CIPHER_CTX_iv_length(pctx.get()) != AES_256_IVSIZE) { - ldpp_dout(dpp, 5) << "EVP: unexpected IV length " - << EVP_CIPHER_CTX_iv_length(pctx.get()) - << " expected " << AES_256_IVSIZE << dendl; - return false; - } + if (encrypt) { + if (1 != EVP_EncryptInit_ex(pctx.get(), EVP_aes_256_gcm(), + nullptr, nullptr, nullptr)) { + ldpp_dout(dpp, 5) << "EVP: failed to initialize GCM" << dendl; + return false; + } - // 2nd init: set key and IV - if (1 != EVP_EncryptInit_ex(pctx.get(), nullptr, nullptr, key, iv)) { - ldpp_dout(dpp, 5) << "EVP: failed to set key/IV" << dendl; - return false; - } + if (1 != EVP_EncryptInit_ex(pctx.get(), nullptr, nullptr, key, iv)) { + ldpp_dout(dpp, 5) << "EVP: failed to set key/IV" << dendl; + return false; + } - // Add AAD for chunk ordering protection - uint8_t aad[8]; - encode_chunk_aad(aad, chunk_index); - int aad_len = 0; - if (1 != EVP_EncryptUpdate(pctx.get(), nullptr, &aad_len, aad, sizeof(aad))) { - ldpp_dout(dpp, 5) << "EVP: failed to set AAD" << dendl; - return false; - } + int aad_len = 0; + if (1 != EVP_EncryptUpdate(pctx.get(), nullptr, &aad_len, aad, sizeof(aad))) { + ldpp_dout(dpp, 5) << "EVP: failed to set AAD" << dendl; + return false; + } - // Encrypt data (size is at most CHUNK_SIZE, well within int range for EVP API) - int written = 0; - ceph_assert(size <= CHUNK_SIZE); - if (1 != EVP_EncryptUpdate(pctx.get(), out, &written, in, size)) { - ldpp_dout(dpp, 5) << "EVP: EncryptUpdate failed" << dendl; - return false; - } + int written = 0; + ceph_assert(size <= CHUNK_SIZE); + if (1 != EVP_EncryptUpdate(pctx.get(), out, &written, in, size)) { + ldpp_dout(dpp, 5) << "EVP: EncryptUpdate failed" << dendl; + return false; + } - // Finalize (GCM doesn't add padding, so finally_written should be 0) - int finally_written = 0; - if (1 != EVP_EncryptFinal_ex(pctx.get(), out + written, &finally_written)) { - ldpp_dout(dpp, 5) << "EVP: EncryptFinal_ex failed" << dendl; - return false; - } + int finally_written = 0; + if (1 != EVP_EncryptFinal_ex(pctx.get(), out + written, &finally_written)) { + ldpp_dout(dpp, 5) << "EVP: EncryptFinal_ex failed" << dendl; + return false; + } - // Get authentication tag - if (1 != EVP_CIPHER_CTX_ctrl(pctx.get(), EVP_CTRL_GCM_GET_TAG, - GCM_TAG_SIZE, tag)) { - ldpp_dout(dpp, 5) << "EVP: failed to get GCM tag" << dendl; - return false; - } + if (1 != EVP_CIPHER_CTX_ctrl(pctx.get(), EVP_CTRL_GCM_GET_TAG, + GCM_TAG_SIZE, tag)) { + ldpp_dout(dpp, 5) << "EVP: failed to get GCM tag" << dendl; + return false; + } - return (written + finally_written) == static_cast(size); - } + return (written + finally_written) == static_cast(size); + } else { + if (1 != EVP_DecryptInit_ex(pctx.get(), EVP_aes_256_gcm(), + nullptr, nullptr, nullptr)) { + ldpp_dout(dpp, 5) << "EVP: failed to initialize GCM" << dendl; + return false; + } - bool gcm_decrypt_chunk(unsigned char* out, - const unsigned char* in, - size_t size, - const unsigned char (&iv)[AES_256_IVSIZE], - const unsigned char (&key)[AES_256_KEYSIZE], - const unsigned char* tag, - uint64_t chunk_index) - { - using pctx_t = std::unique_ptr; - pctx_t pctx{ EVP_CIPHER_CTX_new(), EVP_CIPHER_CTX_free }; + if (1 != EVP_DecryptInit_ex(pctx.get(), nullptr, nullptr, key, iv)) { + ldpp_dout(dpp, 5) << "EVP: failed to set key/IV" << dendl; + return false; + } - if (!pctx) { - ldpp_dout(dpp, 5) << "EVP: failed to create cipher context" << dendl; - return false; - } + int aad_len = 0; + if (1 != EVP_DecryptUpdate(pctx.get(), nullptr, &aad_len, aad, sizeof(aad))) { + ldpp_dout(dpp, 5) << "EVP: failed to set AAD" << dendl; + return false; + } - // 1st init: set cipher type - if (1 != EVP_DecryptInit_ex(pctx.get(), EVP_aes_256_gcm(), - nullptr, nullptr, nullptr)) { - ldpp_dout(dpp, 5) << "EVP: failed to initialize GCM" << dendl; - return false; - } + int written = 0; + ceph_assert(size <= CHUNK_SIZE); + if (1 != EVP_DecryptUpdate(pctx.get(), out, &written, in, size)) { + ldpp_dout(dpp, 5) << "EVP: DecryptUpdate failed" << dendl; + return false; + } - // 2nd init: set key and IV - if (1 != EVP_DecryptInit_ex(pctx.get(), nullptr, nullptr, key, iv)) { - ldpp_dout(dpp, 5) << "EVP: failed to set key/IV" << dendl; - return false; - } + if (1 != EVP_CIPHER_CTX_ctrl(pctx.get(), EVP_CTRL_GCM_SET_TAG, + GCM_TAG_SIZE, const_cast(tag))) { + ldpp_dout(dpp, 5) << "EVP: failed to set GCM tag" << dendl; + return false; + } - // Add AAD for chunk ordering protection (must match encryption) - uint8_t aad[8]; - encode_chunk_aad(aad, chunk_index); - int aad_len = 0; - if (1 != EVP_DecryptUpdate(pctx.get(), nullptr, &aad_len, aad, sizeof(aad))) { - ldpp_dout(dpp, 5) << "EVP: failed to set AAD" << dendl; - return false; - } + int finally_written = 0; + if (1 != EVP_DecryptFinal_ex(pctx.get(), out + written, &finally_written)) { + ldpp_dout(dpp, 5) << "EVP: DecryptFinal_ex failed - authentication failure" << dendl; + memset(out, 0, size); + return false; + } - // Decrypt data (size is at most CHUNK_SIZE, well within int range for EVP API) - int written = 0; - ceph_assert(size <= CHUNK_SIZE); - if (1 != EVP_DecryptUpdate(pctx.get(), out, &written, in, size)) { - ldpp_dout(dpp, 5) << "EVP: DecryptUpdate failed" << dendl; - return false; + return (written + finally_written) == static_cast(size); } + } - // Set expected tag for verification - if (1 != EVP_CIPHER_CTX_ctrl(pctx.get(), EVP_CTRL_GCM_SET_TAG, - GCM_TAG_SIZE, const_cast(tag))) { - ldpp_dout(dpp, 5) << "EVP: failed to set GCM tag" << dendl; - return false; + /** + * GCM transform with hardware acceleration support. + * + * When a hardware accelerator is available, use it exclusively. If the + * accelerated operation fails, that indicates a real crypto error (e.g., + * authentication tag mismatch on decrypt). Only fall back to the OpenSSL + * EVP path when no accelerator is available. + */ + bool gcm_transform(unsigned char* out, + const unsigned char* in, + size_t size, + const unsigned char (&iv)[AES_256_IVSIZE], + const unsigned char (&key)[AES_256_KEYSIZE], + unsigned char* tag, + uint64_t chunk_index, + bool encrypt, + optional_yield y) + { + static std::atomic failed_to_get_crypto_gcm(false); + CryptoAccelRef crypto_accel; + if (!failed_to_get_crypto_gcm.load()) { + static size_t max_requests = g_ceph_context->_conf->rgw_thread_pool_size; + crypto_accel = get_crypto_accel(dpp, cct, CHUNK_SIZE, max_requests); + if (!crypto_accel) + failed_to_get_crypto_gcm = true; } - // Finalize - this verifies the tag - int finally_written = 0; - if (1 != EVP_DecryptFinal_ex(pctx.get(), out + written, &finally_written)) { - ldpp_dout(dpp, 5) << "EVP: DecryptFinal_ex failed - authentication failure" << dendl; - return false; // Tag verification failed + if (crypto_accel != nullptr) { + uint8_t aad[8]; + encode_chunk_aad(aad, chunk_index); + unsigned char tag_buf[GCM_TAG_SIZE]; + + if (encrypt) { + bool result = crypto_accel->gcm_encrypt(out, in, size, iv, key, + aad, sizeof(aad), tag_buf, y); + if (result) memcpy(tag, tag_buf, GCM_TAG_SIZE); + return result; + } else { + memcpy(tag_buf, tag, GCM_TAG_SIZE); + return crypto_accel->gcm_decrypt(out, in, size, iv, key, + aad, sizeof(aad), tag_buf, y); + } } - return (written + finally_written) == static_cast(size); + // No hardware accelerator available — use OpenSSL EVP fallback + return gcm_transform(out, in, size, iv, key, tag, chunk_index, encrypt); } bool encrypt(bufferlist& input, @@ -1067,8 +1091,8 @@ public: unsigned char* ciphertext = buf_raw + out_pos; unsigned char* tag = buf_raw + out_pos + CHUNK_SIZE; - if (!gcm_encrypt_chunk(ciphertext, input_raw + offset, CHUNK_SIZE, - iv, key, tag, chunk_index)) { + if (!gcm_transform(ciphertext, input_raw + offset, CHUNK_SIZE, + iv, key, tag, chunk_index, true, y)) { ldpp_dout(dpp, 5) << "Failed to encrypt chunk at offset " << offset << dendl; return false; } @@ -1087,8 +1111,8 @@ public: unsigned char* ciphertext = buf_raw + out_pos; unsigned char* tag = buf_raw + out_pos + remainder; - if (!gcm_encrypt_chunk(ciphertext, input_raw + num_full_chunks * CHUNK_SIZE, - remainder, iv, key, tag, chunk_index)) { + if (!gcm_transform(ciphertext, input_raw + num_full_chunks * CHUNK_SIZE, + remainder, iv, key, tag, chunk_index, true, y)) { ldpp_dout(dpp, 5) << "Failed to encrypt final chunk" << dendl; return false; } @@ -1142,8 +1166,8 @@ public: unsigned char* ciphertext = input_raw + in_pos; unsigned char* tag = input_raw + in_pos + CHUNK_SIZE; - if (!gcm_decrypt_chunk(buf_raw + out_pos, ciphertext, CHUNK_SIZE, - iv, key, tag, chunk_index)) { + if (!gcm_transform(buf_raw + out_pos, ciphertext, CHUNK_SIZE, + iv, key, tag, chunk_index, false, y)) { ldpp_dout(dpp, 5) << "GCM: Failed to decrypt chunk " << i << " - authentication failed" << dendl; return false; @@ -1165,8 +1189,8 @@ public: unsigned char* ciphertext = input_raw + in_pos; unsigned char* tag = input_raw + in_pos + plaintext_size; - if (!gcm_decrypt_chunk(buf_raw + out_pos, ciphertext, plaintext_size, - iv, key, tag, chunk_index)) { + if (!gcm_transform(buf_raw + out_pos, ciphertext, plaintext_size, + iv, key, tag, chunk_index, false, y)) { ldpp_dout(dpp, 5) << "GCM: Failed to decrypt final chunk - authentication failed" << dendl; return false; } diff --git a/src/test/rgw/test_rgw_crypto.cc b/src/test/rgw/test_rgw_crypto.cc index 58a2d4eaccf..d539580ba7a 100644 --- a/src/test/rgw/test_rgw_crypto.cc +++ b/src/test/rgw/test_rgw_crypto.cc @@ -1039,11 +1039,12 @@ TEST(TestRGWCrypto, verify_AES_256_GCM_tag_verification) TEST(TestRGWCrypto, verify_AES_256_GCM_nonce_uniqueness) { - // This test verifies the MinIO-style per-object random nonce mechanism: - // 1. Each GCM instance gets a unique random nonce - // 2. Decryption with wrong nonce fails - // 3. Decryption with correct nonce succeeds - + /** + * Verify per-object random nonce mechanism: + * 1. Each GCM instance gets a unique random nonce + * 2. Decryption with wrong nonce fails + * 3. Decryption with correct nonce succeeds + */ const NoDoutPrefix no_dpp(g_ceph_context, dout_subsys); uint8_t key[32]; for(size_t i=0;i