crypto operations based on the OpenSSL crypto plugin. As a result, one QAT plugin
based on native QAT API is added into the crypto framework.
+.. note:: QAT acceleration currently supports only AES-256-CBC encryption mode.
+ The AES-256-GCM encryption mode (see :ref:`radosgw-encryption`) uses ISA-L
+ or OpenSSL acceleration instead. QAT support for GCM may be added in a
+ future release.
+
2. QAT Support for Compression
As mentioned above, QAT support for compression is based on the QATzip library
static const int AES_256_IVSIZE = 128/8;
static const int AES_256_KEYSIZE = 256/8;
+
+ /**
+ * GCM constants (nonce size is 12 bytes; distinct from CBC's 16-byte IV).
+ * NIST SP 800-38D recommends 96-bit (12-byte) IVs for GCM.
+ */
+ static const int AES_GCM_NONCE_SIZE = 96/8; // 12 bytes
+ static const int AES_GCM_TAGSIZE = 16; // 128-bit auth tag
+
virtual bool cbc_encrypt(unsigned char* out, const unsigned char* in, size_t size,
const unsigned char (&iv)[AES_256_IVSIZE],
const unsigned char (&key)[AES_256_KEYSIZE],
const unsigned char iv[][AES_256_IVSIZE],
const unsigned char (&key)[AES_256_KEYSIZE],
optional_yield y) = 0;
+
+ virtual bool gcm_encrypt(unsigned char* out, const unsigned char* in, size_t size,
+ const unsigned char (&iv)[AES_GCM_NONCE_SIZE],
+ const unsigned char (&key)[AES_256_KEYSIZE],
+ const unsigned char* aad, size_t aad_len,
+ unsigned char (&tag)[AES_GCM_TAGSIZE],
+ optional_yield y) = 0;
+ virtual bool gcm_decrypt(unsigned char* out, const unsigned char* in, size_t size,
+ const unsigned char (&iv)[AES_GCM_NONCE_SIZE],
+ const unsigned char (&key)[AES_256_KEYSIZE],
+ const unsigned char* aad, size_t aad_len,
+ const unsigned char (&tag)[AES_GCM_TAGSIZE],
+ optional_yield y) = 0;
+ virtual bool gcm_encrypt_batch(unsigned char* out, const unsigned char* in, size_t size,
+ const unsigned char iv[][AES_GCM_NONCE_SIZE],
+ const unsigned char (&key)[AES_256_KEYSIZE],
+ const unsigned char* const aad[], const size_t aad_len[],
+ unsigned char tag[][AES_GCM_TAGSIZE],
+ optional_yield y) = 0;
+ virtual bool gcm_decrypt_batch(unsigned char* out, const unsigned char* in, size_t size,
+ const unsigned char iv[][AES_GCM_NONCE_SIZE],
+ const unsigned char (&key)[AES_256_KEYSIZE],
+ const unsigned char* const aad[], const size_t aad_len[],
+ const unsigned char tag[][AES_GCM_TAGSIZE],
+ optional_yield y) = 0;
};
#endif
#include "crypto/isa-l/isal_crypto_accel.h"
#include "crypto/isa-l/isa-l_crypto/include/aes_cbc.h"
+#include "crypto/isa-l/isa-l_crypto/include/aes_gcm.h"
+#include "include/compat.h" // for ceph_memzero_s
+#include <cstdint>
+#include <cstring>
bool ISALCryptoAccel::cbc_encrypt(unsigned char* out, const unsigned char* in, size_t size,
const unsigned char (&iv)[AES_256_IVSIZE],
aes_cbc_dec_256(const_cast<unsigned char*>(in), const_cast<unsigned char*>(&iv[0]), keys_blk.dec_keys, out, size);
return true;
}
+
+/**
+ * Constant-time byte comparison to prevent timing attacks on tag verification.
+ * Always compares all bytes regardless of differences found.
+ */
+static inline bool ct_memeq(const unsigned char* a, const unsigned char* b, size_t len)
+{
+ volatile unsigned char diff = 0;
+ for (size_t i = 0; i < len; ++i) {
+ diff |= static_cast<unsigned char>(a[i] ^ b[i]);
+ }
+ return diff == 0;
+}
+
+/**
+ * Thread-local GCM key cache to avoid re-running aes_gcm_pre_256() for
+ * repeated keys. Key material is securely wiped on key change and thread exit.
+ */
+static inline const gcm_key_data* get_cached_gcm_key(const unsigned char* key)
+{
+ struct gcm_key_cache_t {
+ bool valid = false;
+ unsigned char last_key[CryptoAccel::AES_256_KEYSIZE];
+ alignas(16) gcm_key_data cached_gkey;
+
+ void purge() {
+ if (valid) {
+ ceph_memzero_s(last_key, sizeof(last_key), sizeof(last_key));
+ ceph_memzero_s(&cached_gkey, sizeof(cached_gkey), sizeof(cached_gkey));
+ valid = false;
+ }
+ }
+
+ ~gcm_key_cache_t() { purge(); }
+ };
+
+ static thread_local gcm_key_cache_t cache;
+
+ if (!cache.valid || !ct_memeq(cache.last_key, key, CryptoAccel::AES_256_KEYSIZE)) {
+ cache.purge();
+ aes_gcm_pre_256(key, &cache.cached_gkey);
+ memcpy(cache.last_key, key, CryptoAccel::AES_256_KEYSIZE);
+ cache.valid = true;
+ }
+
+ return &cache.cached_gkey;
+}
+
+bool ISALCryptoAccel::gcm_encrypt(unsigned char* out, const unsigned char* in, size_t size,
+ const unsigned char (&iv)[AES_GCM_NONCE_SIZE],
+ const unsigned char (&key)[AES_256_KEYSIZE],
+ const unsigned char* aad, size_t aad_len,
+ unsigned char (&tag)[AES_GCM_TAGSIZE],
+ optional_yield y)
+{
+ if (!out || !in) {
+ return false;
+ }
+
+ const gcm_key_data* gkey = get_cached_gcm_key(&key[0]);
+ alignas(16) struct gcm_context_data gctx;
+
+ // Copy IV (ISA-L may modify it internally)
+ uint8_t iv_copy[AES_GCM_NONCE_SIZE];
+ memcpy(iv_copy, &iv[0], AES_GCM_NONCE_SIZE);
+
+ aes_gcm_enc_256(gkey, &gctx,
+ reinterpret_cast<uint8_t*>(out),
+ reinterpret_cast<const uint8_t*>(in),
+ static_cast<uint64_t>(size),
+ iv_copy,
+ reinterpret_cast<const uint8_t*>(aad),
+ static_cast<uint64_t>(aad_len),
+ &tag[0], AES_GCM_TAGSIZE);
+
+ return true;
+}
+
+bool ISALCryptoAccel::gcm_decrypt(unsigned char* out, const unsigned char* in, size_t size,
+ const unsigned char (&iv)[AES_GCM_NONCE_SIZE],
+ const unsigned char (&key)[AES_256_KEYSIZE],
+ const unsigned char* aad, size_t aad_len,
+ const unsigned char (&tag)[AES_GCM_TAGSIZE],
+ optional_yield y)
+{
+ if (!out || !in) {
+ return false;
+ }
+
+ const gcm_key_data* gkey = get_cached_gcm_key(&key[0]);
+ alignas(16) struct gcm_context_data gctx;
+
+ uint8_t iv_copy[AES_GCM_NONCE_SIZE];
+ memcpy(iv_copy, &iv[0], AES_GCM_NONCE_SIZE);
+
+ // Decrypt and compute tag
+ unsigned char computed_tag[AES_GCM_TAGSIZE];
+ aes_gcm_dec_256(gkey, &gctx,
+ reinterpret_cast<uint8_t*>(out),
+ reinterpret_cast<const uint8_t*>(in),
+ static_cast<uint64_t>(size),
+ iv_copy,
+ reinterpret_cast<const uint8_t*>(aad),
+ static_cast<uint64_t>(aad_len),
+ computed_tag, AES_GCM_TAGSIZE);
+
+ // Constant-time tag comparison
+ if (!ct_memeq(computed_tag, &tag[0], AES_GCM_TAGSIZE)) {
+ memset(out, 0, size); // Clear output on auth failure
+ return false;
+ }
+
+ return true;
+}
const unsigned char iv[][AES_256_IVSIZE],
const unsigned char (&key)[AES_256_KEYSIZE],
optional_yield y) override { return false; }
+
+ bool gcm_encrypt(unsigned char* out, const unsigned char* in, size_t size,
+ const unsigned char (&iv)[AES_GCM_NONCE_SIZE],
+ const unsigned char (&key)[AES_256_KEYSIZE],
+ const unsigned char* aad, size_t aad_len,
+ unsigned char (&tag)[AES_GCM_TAGSIZE],
+ optional_yield y) override;
+
+ bool gcm_decrypt(unsigned char* out, const unsigned char* in, size_t size,
+ const unsigned char (&iv)[AES_GCM_NONCE_SIZE],
+ const unsigned char (&key)[AES_256_KEYSIZE],
+ const unsigned char* aad, size_t aad_len,
+ const unsigned char (&tag)[AES_GCM_TAGSIZE],
+ optional_yield y) override;
+
+ bool gcm_encrypt_batch(unsigned char* out, const unsigned char* in, size_t size,
+ const unsigned char iv[][AES_GCM_NONCE_SIZE],
+ const unsigned char (&key)[AES_256_KEYSIZE],
+ const unsigned char* const aad[], const size_t aad_len[],
+ unsigned char tag[][AES_GCM_TAGSIZE],
+ optional_yield y) override { return false; }
+
+ bool gcm_decrypt_batch(unsigned char* out, const unsigned char* in, size_t size,
+ const unsigned char iv[][AES_GCM_NONCE_SIZE],
+ const unsigned char (&key)[AES_256_KEYSIZE],
+ const unsigned char* const aad[], const size_t aad_len[],
+ const unsigned char tag[][AES_GCM_TAGSIZE],
+ optional_yield y) override { return false; }
};
#endif
nullptr, // Hardware acceleration engine can be used in the future
EVP_aes_256_cbc(), AES_DECRYPT);
}
+
+bool OpenSSLCryptoAccel::gcm_encrypt(unsigned char* out, const unsigned char* in, size_t size,
+ const unsigned char (&iv)[AES_GCM_NONCE_SIZE],
+ const unsigned char (&key)[AES_256_KEYSIZE],
+ const unsigned char* aad, size_t aad_len,
+ unsigned char (&tag)[AES_GCM_TAGSIZE],
+ optional_yield y)
+{
+ using pctx_t = std::unique_ptr<EVP_CIPHER_CTX, decltype(&::EVP_CIPHER_CTX_free)>;
+ pctx_t pctx{ EVP_CIPHER_CTX_new(), EVP_CIPHER_CTX_free };
+
+ if (!pctx) {
+ derr << "failed to create evp cipher context for GCM encrypt" << dendl;
+ return false;
+ }
+
+ int outlen;
+
+ if (EVP_EncryptInit_ex(pctx.get(), EVP_aes_256_gcm(), nullptr, nullptr, nullptr) != EVP_SUCCESS) {
+ derr << "EVP_EncryptInit_ex failed for GCM" << dendl;
+ return false;
+ }
+
+ if (EVP_CIPHER_CTX_ctrl(pctx.get(), EVP_CTRL_GCM_SET_IVLEN, AES_GCM_NONCE_SIZE, nullptr) != EVP_SUCCESS) {
+ derr << "failed to set GCM IV length" << dendl;
+ return false;
+ }
+
+ if (EVP_EncryptInit_ex(pctx.get(), nullptr, nullptr, &key[0], &iv[0]) != EVP_SUCCESS) {
+ derr << "failed to set GCM key/IV" << dendl;
+ return false;
+ }
+
+ // Add AAD (pass nullptr for out to process AAD only)
+ if (aad_len > 0) {
+ if (EVP_EncryptUpdate(pctx.get(), nullptr, &outlen, aad,
+ static_cast<int>(aad_len)) != EVP_SUCCESS) {
+ derr << "failed to set GCM AAD" << dendl;
+ return false;
+ }
+ }
+
+ if (EVP_EncryptUpdate(pctx.get(), out, &outlen, in,
+ static_cast<int>(size)) != EVP_SUCCESS) {
+ derr << "EVP_EncryptUpdate failed for GCM" << dendl;
+ return false;
+ }
+
+ int final_len;
+ if (EVP_EncryptFinal_ex(pctx.get(), out + outlen, &final_len) != EVP_SUCCESS) {
+ derr << "EVP_EncryptFinal_ex failed for GCM" << dendl;
+ return false;
+ }
+
+ if (EVP_CIPHER_CTX_ctrl(pctx.get(), EVP_CTRL_GCM_GET_TAG, AES_GCM_TAGSIZE, &tag[0]) != EVP_SUCCESS) {
+ derr << "failed to get GCM tag" << dendl;
+ return false;
+ }
+
+ return true;
+}
+
+bool OpenSSLCryptoAccel::gcm_decrypt(unsigned char* out, const unsigned char* in, size_t size,
+ const unsigned char (&iv)[AES_GCM_NONCE_SIZE],
+ const unsigned char (&key)[AES_256_KEYSIZE],
+ const unsigned char* aad, size_t aad_len,
+ const unsigned char (&tag)[AES_GCM_TAGSIZE],
+ optional_yield y)
+{
+ using pctx_t = std::unique_ptr<EVP_CIPHER_CTX, decltype(&::EVP_CIPHER_CTX_free)>;
+ pctx_t pctx{ EVP_CIPHER_CTX_new(), EVP_CIPHER_CTX_free };
+
+ if (!pctx) {
+ derr << "failed to create evp cipher context for GCM decrypt" << dendl;
+ return false;
+ }
+
+ int outlen;
+
+ if (EVP_DecryptInit_ex(pctx.get(), EVP_aes_256_gcm(), nullptr, nullptr, nullptr) != EVP_SUCCESS) {
+ derr << "EVP_DecryptInit_ex failed for GCM" << dendl;
+ return false;
+ }
+
+ if (EVP_CIPHER_CTX_ctrl(pctx.get(), EVP_CTRL_GCM_SET_IVLEN, AES_GCM_NONCE_SIZE, nullptr) != EVP_SUCCESS) {
+ derr << "failed to set GCM IV length" << dendl;
+ return false;
+ }
+
+ if (EVP_DecryptInit_ex(pctx.get(), nullptr, nullptr, &key[0], &iv[0]) != EVP_SUCCESS) {
+ derr << "failed to set GCM key/IV" << dendl;
+ return false;
+ }
+
+ if (EVP_CIPHER_CTX_ctrl(pctx.get(), EVP_CTRL_GCM_SET_TAG, AES_GCM_TAGSIZE,
+ const_cast<unsigned char*>(&tag[0])) != EVP_SUCCESS) {
+ derr << "failed to set GCM expected tag" << dendl;
+ return false;
+ }
+
+ // Add AAD (must match encryption)
+ if (aad_len > 0) {
+ if (EVP_DecryptUpdate(pctx.get(), nullptr, &outlen, aad,
+ static_cast<int>(aad_len)) != EVP_SUCCESS) {
+ derr << "failed to set GCM AAD" << dendl;
+ return false;
+ }
+ }
+
+ if (EVP_DecryptUpdate(pctx.get(), out, &outlen, in,
+ static_cast<int>(size)) != EVP_SUCCESS) {
+ derr << "EVP_DecryptUpdate failed for GCM" << dendl;
+ return false;
+ }
+
+ int final_len;
+ if (EVP_DecryptFinal_ex(pctx.get(), out + outlen, &final_len) != EVP_SUCCESS) {
+ // Authentication failure - tag mismatch
+ derr << "GCM authentication failed - tag mismatch" << dendl;
+ memset(out, 0, size); // Clear output on auth failure
+ return false;
+ }
+
+ return true;
+}
const unsigned char iv[][AES_256_IVSIZE],
const unsigned char (&key)[AES_256_KEYSIZE],
optional_yield y) override { return false; }
+
+ bool gcm_encrypt(unsigned char* out, const unsigned char* in, size_t size,
+ const unsigned char (&iv)[AES_GCM_NONCE_SIZE],
+ const unsigned char (&key)[AES_256_KEYSIZE],
+ const unsigned char* aad, size_t aad_len,
+ unsigned char (&tag)[AES_GCM_TAGSIZE],
+ optional_yield y) override;
+
+ bool gcm_decrypt(unsigned char* out, const unsigned char* in, size_t size,
+ const unsigned char (&iv)[AES_GCM_NONCE_SIZE],
+ const unsigned char (&key)[AES_256_KEYSIZE],
+ const unsigned char* aad, size_t aad_len,
+ const unsigned char (&tag)[AES_GCM_TAGSIZE],
+ optional_yield y) override;
+
+ bool gcm_encrypt_batch(unsigned char* out, const unsigned char* in, size_t size,
+ const unsigned char iv[][AES_GCM_NONCE_SIZE],
+ const unsigned char (&key)[AES_256_KEYSIZE],
+ const unsigned char* const aad[], const size_t aad_len[],
+ unsigned char tag[][AES_GCM_TAGSIZE],
+ optional_yield y) override { return false; }
+
+ bool gcm_decrypt_batch(unsigned char* out, const unsigned char* in, size_t size,
+ const unsigned char iv[][AES_GCM_NONCE_SIZE],
+ const unsigned char (&key)[AES_256_KEYSIZE],
+ const unsigned char* const aad[], const size_t aad_len[],
+ const unsigned char tag[][AES_GCM_TAGSIZE],
+ optional_yield y) override { return false; }
};
#endif
const unsigned char iv[][AES_256_IVSIZE],
const unsigned char (&key)[AES_256_KEYSIZE],
optional_yield y) override;
+
+ bool gcm_encrypt(unsigned char* out, const unsigned char* in, size_t size,
+ const unsigned char (&iv)[AES_GCM_NONCE_SIZE],
+ const unsigned char (&key)[AES_256_KEYSIZE],
+ const unsigned char* aad, size_t aad_len,
+ unsigned char (&tag)[AES_GCM_TAGSIZE],
+ optional_yield y) override { return false; }
+ bool gcm_decrypt(unsigned char* out, const unsigned char* in, size_t size,
+ const unsigned char (&iv)[AES_GCM_NONCE_SIZE],
+ const unsigned char (&key)[AES_256_KEYSIZE],
+ const unsigned char* aad, size_t aad_len,
+ const unsigned char (&tag)[AES_GCM_TAGSIZE],
+ optional_yield y) override { return false; }
+ bool gcm_encrypt_batch(unsigned char* out, const unsigned char* in, size_t size,
+ const unsigned char iv[][AES_GCM_NONCE_SIZE],
+ const unsigned char (&key)[AES_256_KEYSIZE],
+ const unsigned char* const aad[], const size_t aad_len[],
+ unsigned char tag[][AES_GCM_TAGSIZE],
+ optional_yield y) override { return false; }
+ bool gcm_decrypt_batch(unsigned char* out, const unsigned char* in, size_t size,
+ const unsigned char iv[][AES_GCM_NONCE_SIZE],
+ const unsigned char (&key)[AES_256_KEYSIZE],
+ const unsigned char* const aad[], const size_t aad_len[],
+ const unsigned char tag[][AES_GCM_TAGSIZE],
+ optional_yield y) override { return false; }
};
#endif
src_attrs.erase(RGW_ATTR_OBJ_REPLICATION_STATUS);
/**
- * Drop encryption attributes - will be generated by copy_obj_data() if
- * encryption is requested. CRYPT_ORIGINAL_SIZE and CRYPT_PARTS are preserved
- * for size calculations. CRYPT_PART_NUMS must be erased because copy writes
- * data as a single stream (part 0), so stale part numbers from a multipart
- * source would cause wrong key derivation.
+ * Drop encryption key-related attributes — they will be regenerated by
+ * copy_obj_data() if encryption is requested.
+ *
+ * CRYPT_ORIGINAL_SIZE and CRYPT_PARTS are intentionally preserved here.
+ * In the zero-copy path (!copy_data), the object's raw bytes are not
+ * rewritten, so the encryption state (ciphertext layout, sizes) is
+ * unchanged and these size attributes remain valid. In the copy_data
+ * path, they are erased separately once the data is rewritten as a
+ * single stream.
+ *
+ * CRYPT_PART_NUMS must be erased because copy writes data as a single
+ * stream (part 0), so stale part numbers from a multipart source would
+ * cause wrong key derivation.
*/
src_attrs.erase(RGW_ATTR_CRYPT_KEYSEL);
src_attrs.erase(RGW_ATTR_CRYPT_CONTEXT);
}
/**
- * For AEAD encryption: adjust accounted_size to original size.
+ * For AEAD encryption: adjust accounted_size to original plaintext size.
+ * This ensures Content-Length headers and range requests use plaintext size.
* Helpers return false for non-AEAD modes (including CBC), so this is a no-op
* outside of AEAD.
* Must be after manifest handling since manifest->get_obj_size() returns
#define dout_context g_ceph_context
#define dout_subsys ceph_subsys_rgw
+/**
+ * Ensure CryptoAccel GCM constants match RGW GCM constants.
+ * Prevents silent IV/tag size mismatches between acceleration layer and RGW.
+ */
+static_assert(CryptoAccel::AES_GCM_NONCE_SIZE == AES_256_GCM_NONCE_SIZE,
+ "CryptoAccel and RGW GCM nonce sizes must match");
+static_assert(CryptoAccel::AES_GCM_TAGSIZE == AEAD_TAG_SIZE,
+ "CryptoAccel and RGW GCM tag sizes must match");
+
using namespace std;
using namespace rgw;
aad[7] = chunk_index & 0xFF;
}
- bool gcm_encrypt_chunk(unsigned char* out,
- const unsigned char* in,
- size_t size,
- const unsigned char (&iv)[AES_256_IVSIZE],
- const unsigned char (&key)[AES_256_KEYSIZE],
- unsigned char* tag,
- uint64_t chunk_index)
+ // GCM transform using OpenSSL EVP (no acceleration)
+ bool gcm_transform(unsigned char* out,
+ const unsigned char* in,
+ size_t size,
+ const unsigned char (&iv)[AES_256_IVSIZE],
+ const unsigned char (&key)[AES_256_KEYSIZE],
+ unsigned char* tag,
+ uint64_t chunk_index,
+ bool encrypt)
{
using pctx_t = std::unique_ptr<EVP_CIPHER_CTX, decltype(&::EVP_CIPHER_CTX_free)>;
pctx_t pctx{ EVP_CIPHER_CTX_new(), EVP_CIPHER_CTX_free };
return false;
}
- // 1st init: set cipher type
- if (1 != EVP_EncryptInit_ex(pctx.get(), EVP_aes_256_gcm(),
- nullptr, nullptr, nullptr)) {
- ldpp_dout(dpp, 5) << "EVP: failed to initialize GCM" << dendl;
- return false;
- }
+ uint8_t aad[8];
+ encode_chunk_aad(aad, chunk_index);
- // Verify IV size (should be 12 bytes for GCM)
- if (EVP_CIPHER_CTX_iv_length(pctx.get()) != AES_256_IVSIZE) {
- ldpp_dout(dpp, 5) << "EVP: unexpected IV length "
- << EVP_CIPHER_CTX_iv_length(pctx.get())
- << " expected " << AES_256_IVSIZE << dendl;
- return false;
- }
+ if (encrypt) {
+ if (1 != EVP_EncryptInit_ex(pctx.get(), EVP_aes_256_gcm(),
+ nullptr, nullptr, nullptr)) {
+ ldpp_dout(dpp, 5) << "EVP: failed to initialize GCM" << dendl;
+ return false;
+ }
- // 2nd init: set key and IV
- if (1 != EVP_EncryptInit_ex(pctx.get(), nullptr, nullptr, key, iv)) {
- ldpp_dout(dpp, 5) << "EVP: failed to set key/IV" << dendl;
- return false;
- }
+ if (1 != EVP_EncryptInit_ex(pctx.get(), nullptr, nullptr, key, iv)) {
+ ldpp_dout(dpp, 5) << "EVP: failed to set key/IV" << dendl;
+ return false;
+ }
- // Add AAD for chunk ordering protection
- uint8_t aad[8];
- encode_chunk_aad(aad, chunk_index);
- int aad_len = 0;
- if (1 != EVP_EncryptUpdate(pctx.get(), nullptr, &aad_len, aad, sizeof(aad))) {
- ldpp_dout(dpp, 5) << "EVP: failed to set AAD" << dendl;
- return false;
- }
+ int aad_len = 0;
+ if (1 != EVP_EncryptUpdate(pctx.get(), nullptr, &aad_len, aad, sizeof(aad))) {
+ ldpp_dout(dpp, 5) << "EVP: failed to set AAD" << dendl;
+ return false;
+ }
- // Encrypt data (size is at most CHUNK_SIZE, well within int range for EVP API)
- int written = 0;
- ceph_assert(size <= CHUNK_SIZE);
- if (1 != EVP_EncryptUpdate(pctx.get(), out, &written, in, size)) {
- ldpp_dout(dpp, 5) << "EVP: EncryptUpdate failed" << dendl;
- return false;
- }
+ int written = 0;
+ ceph_assert(size <= CHUNK_SIZE);
+ if (1 != EVP_EncryptUpdate(pctx.get(), out, &written, in, size)) {
+ ldpp_dout(dpp, 5) << "EVP: EncryptUpdate failed" << dendl;
+ return false;
+ }
- // Finalize (GCM doesn't add padding, so finally_written should be 0)
- int finally_written = 0;
- if (1 != EVP_EncryptFinal_ex(pctx.get(), out + written, &finally_written)) {
- ldpp_dout(dpp, 5) << "EVP: EncryptFinal_ex failed" << dendl;
- return false;
- }
+ int finally_written = 0;
+ if (1 != EVP_EncryptFinal_ex(pctx.get(), out + written, &finally_written)) {
+ ldpp_dout(dpp, 5) << "EVP: EncryptFinal_ex failed" << dendl;
+ return false;
+ }
- // Get authentication tag
- if (1 != EVP_CIPHER_CTX_ctrl(pctx.get(), EVP_CTRL_GCM_GET_TAG,
- GCM_TAG_SIZE, tag)) {
- ldpp_dout(dpp, 5) << "EVP: failed to get GCM tag" << dendl;
- return false;
- }
+ if (1 != EVP_CIPHER_CTX_ctrl(pctx.get(), EVP_CTRL_GCM_GET_TAG,
+ GCM_TAG_SIZE, tag)) {
+ ldpp_dout(dpp, 5) << "EVP: failed to get GCM tag" << dendl;
+ return false;
+ }
- return (written + finally_written) == static_cast<int>(size);
- }
+ return (written + finally_written) == static_cast<int>(size);
+ } else {
+ if (1 != EVP_DecryptInit_ex(pctx.get(), EVP_aes_256_gcm(),
+ nullptr, nullptr, nullptr)) {
+ ldpp_dout(dpp, 5) << "EVP: failed to initialize GCM" << dendl;
+ return false;
+ }
- bool gcm_decrypt_chunk(unsigned char* out,
- const unsigned char* in,
- size_t size,
- const unsigned char (&iv)[AES_256_IVSIZE],
- const unsigned char (&key)[AES_256_KEYSIZE],
- const unsigned char* tag,
- uint64_t chunk_index)
- {
- using pctx_t = std::unique_ptr<EVP_CIPHER_CTX, decltype(&::EVP_CIPHER_CTX_free)>;
- pctx_t pctx{ EVP_CIPHER_CTX_new(), EVP_CIPHER_CTX_free };
+ if (1 != EVP_DecryptInit_ex(pctx.get(), nullptr, nullptr, key, iv)) {
+ ldpp_dout(dpp, 5) << "EVP: failed to set key/IV" << dendl;
+ return false;
+ }
- if (!pctx) {
- ldpp_dout(dpp, 5) << "EVP: failed to create cipher context" << dendl;
- return false;
- }
+ int aad_len = 0;
+ if (1 != EVP_DecryptUpdate(pctx.get(), nullptr, &aad_len, aad, sizeof(aad))) {
+ ldpp_dout(dpp, 5) << "EVP: failed to set AAD" << dendl;
+ return false;
+ }
- // 1st init: set cipher type
- if (1 != EVP_DecryptInit_ex(pctx.get(), EVP_aes_256_gcm(),
- nullptr, nullptr, nullptr)) {
- ldpp_dout(dpp, 5) << "EVP: failed to initialize GCM" << dendl;
- return false;
- }
+ int written = 0;
+ ceph_assert(size <= CHUNK_SIZE);
+ if (1 != EVP_DecryptUpdate(pctx.get(), out, &written, in, size)) {
+ ldpp_dout(dpp, 5) << "EVP: DecryptUpdate failed" << dendl;
+ return false;
+ }
- // 2nd init: set key and IV
- if (1 != EVP_DecryptInit_ex(pctx.get(), nullptr, nullptr, key, iv)) {
- ldpp_dout(dpp, 5) << "EVP: failed to set key/IV" << dendl;
- return false;
- }
+ if (1 != EVP_CIPHER_CTX_ctrl(pctx.get(), EVP_CTRL_GCM_SET_TAG,
+ GCM_TAG_SIZE, const_cast<unsigned char*>(tag))) {
+ ldpp_dout(dpp, 5) << "EVP: failed to set GCM tag" << dendl;
+ return false;
+ }
- // Add AAD for chunk ordering protection (must match encryption)
- uint8_t aad[8];
- encode_chunk_aad(aad, chunk_index);
- int aad_len = 0;
- if (1 != EVP_DecryptUpdate(pctx.get(), nullptr, &aad_len, aad, sizeof(aad))) {
- ldpp_dout(dpp, 5) << "EVP: failed to set AAD" << dendl;
- return false;
- }
+ int finally_written = 0;
+ if (1 != EVP_DecryptFinal_ex(pctx.get(), out + written, &finally_written)) {
+ ldpp_dout(dpp, 5) << "EVP: DecryptFinal_ex failed - authentication failure" << dendl;
+ memset(out, 0, size);
+ return false;
+ }
- // Decrypt data (size is at most CHUNK_SIZE, well within int range for EVP API)
- int written = 0;
- ceph_assert(size <= CHUNK_SIZE);
- if (1 != EVP_DecryptUpdate(pctx.get(), out, &written, in, size)) {
- ldpp_dout(dpp, 5) << "EVP: DecryptUpdate failed" << dendl;
- return false;
+ return (written + finally_written) == static_cast<int>(size);
}
+ }
- // Set expected tag for verification
- if (1 != EVP_CIPHER_CTX_ctrl(pctx.get(), EVP_CTRL_GCM_SET_TAG,
- GCM_TAG_SIZE, const_cast<unsigned char*>(tag))) {
- ldpp_dout(dpp, 5) << "EVP: failed to set GCM tag" << dendl;
- return false;
+ /**
+ * GCM transform with hardware acceleration support.
+ *
+ * When a hardware accelerator is available, use it exclusively. If the
+ * accelerated operation fails, that indicates a real crypto error (e.g.,
+ * authentication tag mismatch on decrypt). Only fall back to the OpenSSL
+ * EVP path when no accelerator is available.
+ */
+ bool gcm_transform(unsigned char* out,
+ const unsigned char* in,
+ size_t size,
+ const unsigned char (&iv)[AES_256_IVSIZE],
+ const unsigned char (&key)[AES_256_KEYSIZE],
+ unsigned char* tag,
+ uint64_t chunk_index,
+ bool encrypt,
+ optional_yield y)
+ {
+ static std::atomic<bool> failed_to_get_crypto_gcm(false);
+ CryptoAccelRef crypto_accel;
+ if (!failed_to_get_crypto_gcm.load()) {
+ static size_t max_requests = g_ceph_context->_conf->rgw_thread_pool_size;
+ crypto_accel = get_crypto_accel(dpp, cct, CHUNK_SIZE, max_requests);
+ if (!crypto_accel)
+ failed_to_get_crypto_gcm = true;
}
- // Finalize - this verifies the tag
- int finally_written = 0;
- if (1 != EVP_DecryptFinal_ex(pctx.get(), out + written, &finally_written)) {
- ldpp_dout(dpp, 5) << "EVP: DecryptFinal_ex failed - authentication failure" << dendl;
- return false; // Tag verification failed
+ if (crypto_accel != nullptr) {
+ uint8_t aad[8];
+ encode_chunk_aad(aad, chunk_index);
+ unsigned char tag_buf[GCM_TAG_SIZE];
+
+ if (encrypt) {
+ bool result = crypto_accel->gcm_encrypt(out, in, size, iv, key,
+ aad, sizeof(aad), tag_buf, y);
+ if (result) memcpy(tag, tag_buf, GCM_TAG_SIZE);
+ return result;
+ } else {
+ memcpy(tag_buf, tag, GCM_TAG_SIZE);
+ return crypto_accel->gcm_decrypt(out, in, size, iv, key,
+ aad, sizeof(aad), tag_buf, y);
+ }
}
- return (written + finally_written) == static_cast<int>(size);
+ // No hardware accelerator available — use OpenSSL EVP fallback
+ return gcm_transform(out, in, size, iv, key, tag, chunk_index, encrypt);
}
bool encrypt(bufferlist& input,
unsigned char* ciphertext = buf_raw + out_pos;
unsigned char* tag = buf_raw + out_pos + CHUNK_SIZE;
- if (!gcm_encrypt_chunk(ciphertext, input_raw + offset, CHUNK_SIZE,
- iv, key, tag, chunk_index)) {
+ if (!gcm_transform(ciphertext, input_raw + offset, CHUNK_SIZE,
+ iv, key, tag, chunk_index, true, y)) {
ldpp_dout(dpp, 5) << "Failed to encrypt chunk at offset " << offset << dendl;
return false;
}
unsigned char* ciphertext = buf_raw + out_pos;
unsigned char* tag = buf_raw + out_pos + remainder;
- if (!gcm_encrypt_chunk(ciphertext, input_raw + num_full_chunks * CHUNK_SIZE,
- remainder, iv, key, tag, chunk_index)) {
+ if (!gcm_transform(ciphertext, input_raw + num_full_chunks * CHUNK_SIZE,
+ remainder, iv, key, tag, chunk_index, true, y)) {
ldpp_dout(dpp, 5) << "Failed to encrypt final chunk" << dendl;
return false;
}
unsigned char* ciphertext = input_raw + in_pos;
unsigned char* tag = input_raw + in_pos + CHUNK_SIZE;
- if (!gcm_decrypt_chunk(buf_raw + out_pos, ciphertext, CHUNK_SIZE,
- iv, key, tag, chunk_index)) {
+ if (!gcm_transform(buf_raw + out_pos, ciphertext, CHUNK_SIZE,
+ iv, key, tag, chunk_index, false, y)) {
ldpp_dout(dpp, 5) << "GCM: Failed to decrypt chunk " << i
<< " - authentication failed" << dendl;
return false;
unsigned char* ciphertext = input_raw + in_pos;
unsigned char* tag = input_raw + in_pos + plaintext_size;
- if (!gcm_decrypt_chunk(buf_raw + out_pos, ciphertext, plaintext_size,
- iv, key, tag, chunk_index)) {
+ if (!gcm_transform(buf_raw + out_pos, ciphertext, plaintext_size,
+ iv, key, tag, chunk_index, false, y)) {
ldpp_dout(dpp, 5) << "GCM: Failed to decrypt final chunk - authentication failed" << dendl;
return false;
}
TEST(TestRGWCrypto, verify_AES_256_GCM_nonce_uniqueness)
{
- // This test verifies the MinIO-style per-object random nonce mechanism:
- // 1. Each GCM instance gets a unique random nonce
- // 2. Decryption with wrong nonce fails
- // 3. Decryption with correct nonce succeeds
-
+ /**
+ * Verify per-object random nonce mechanism:
+ * 1. Each GCM instance gets a unique random nonce
+ * 2. Decryption with wrong nonce fails
+ * 3. Decryption with correct nonce succeeds
+ */
const NoDoutPrefix no_dpp(g_ceph_context, dout_subsys);
uint8_t key[32];
for(size_t i=0;i<sizeof(key);i++)
TEST(TestRGWCrypto, verify_AES_256_GCM_nonce_restore)
{
- // This test simulates the encrypt/decrypt flow with stored nonce:
- // 1. Encrypt with auto-generated nonce
- // 2. Extract nonce (would be stored in RGW_ATTR_CRYPT_NONCE)
- // 3. Create new instance with restored nonce
- // 4. Decrypt successfully
-
+ /**
+ * Simulate the encrypt/decrypt flow with stored nonce:
+ * 1. Encrypt with auto-generated nonce
+ * 2. Extract nonce (would be stored in RGW_ATTR_CRYPT_NONCE)
+ * 3. Create new instance with restored nonce
+ * 4. Decrypt successfully
+ */
const NoDoutPrefix no_dpp(g_ceph_context, dout_subsys);
uint8_t key[32];
for(size_t i=0;i<sizeof(key);i++)