diff --git a/build.gradle b/build.gradle index ab7e9f57..79c75322 100644 --- a/build.gradle +++ b/build.gradle @@ -11,7 +11,7 @@ buildscript { ext { - opensearch_version = System.getProperty("opensearch.version", "3.1.0-SNAPSHOT") + opensearch_version = System.getProperty("opensearch.version", "3.2.0-SNAPSHOT") } repositories { diff --git a/src/internalClusterTest/java/org/opensearch/index/store/niofs/CryptoDirectoryIntegTestCases.java b/src/internalClusterTest/java/org/opensearch/index/store/niofs/CryptoDirectoryIntegTestCases.java index 61051a49..fa8f600b 100644 --- a/src/internalClusterTest/java/org/opensearch/index/store/niofs/CryptoDirectoryIntegTestCases.java +++ b/src/internalClusterTest/java/org/opensearch/index/store/niofs/CryptoDirectoryIntegTestCases.java @@ -4,10 +4,11 @@ */ package org.opensearch.index.store.niofs; +import static org.hamcrest.Matchers.is; + import java.util.Arrays; import java.util.Collection; -import static org.hamcrest.Matchers.is; import org.opensearch.action.admin.indices.delete.DeleteIndexRequest; import org.opensearch.action.search.SearchResponse; import org.opensearch.common.settings.Settings; @@ -21,7 +22,6 @@ import org.opensearch.plugins.Plugin; import org.opensearch.test.OpenSearchIntegTestCase; - public class CryptoDirectoryIntegTestCases extends OpenSearchIntegTestCase { @Override diff --git a/src/main/java/org/opensearch/index/store/CryptoDirectoryPlugin.java b/src/main/java/org/opensearch/index/store/CryptoDirectoryPlugin.java index e4f2c388..d2b02101 100644 --- a/src/main/java/org/opensearch/index/store/CryptoDirectoryPlugin.java +++ b/src/main/java/org/opensearch/index/store/CryptoDirectoryPlugin.java @@ -5,17 +5,23 @@ package org.opensearch.index.store; import java.util.Arrays; +import java.util.Collections; import java.util.List; import java.util.Map; +import java.util.Optional; import org.opensearch.common.settings.Setting; +import org.opensearch.index.IndexModule; +import org.opensearch.index.IndexSettings; +import org.opensearch.index.engine.EngineFactory; +import org.opensearch.plugins.EnginePlugin; import org.opensearch.plugins.IndexStorePlugin; import org.opensearch.plugins.Plugin; /** * A plugin that enables index level encryption and decryption. */ -public class CryptoDirectoryPlugin extends Plugin implements IndexStorePlugin { +public class CryptoDirectoryPlugin extends Plugin implements IndexStorePlugin, EnginePlugin { /** * The default constructor. @@ -37,6 +43,18 @@ public List> getSettings() { */ @Override public Map getDirectoryFactories() { - return java.util.Collections.singletonMap("cryptofs", new CryptoDirectoryFactory()); + return Collections.singletonMap("cryptofs", new CryptoDirectoryFactory()); + } + + /** + * {@inheritDoc} + */ + @Override + public Optional getEngineFactory(IndexSettings indexSettings) { + // Only provide our custom engine factory for cryptofs indices + if ("cryptofs".equals(indexSettings.getValue(IndexModule.INDEX_STORE_TYPE_SETTING))) { + return Optional.of(new CryptoEngineFactory()); + } + return Optional.empty(); } } diff --git a/src/main/java/org/opensearch/index/store/CryptoEngineFactory.java b/src/main/java/org/opensearch/index/store/CryptoEngineFactory.java new file mode 100644 index 00000000..8bba01f2 --- /dev/null +++ b/src/main/java/org/opensearch/index/store/CryptoEngineFactory.java @@ -0,0 +1,82 @@ +/* + * Copyright OpenSearch Contributors + * SPDX-License-Identifier: Apache-2.0 + */ +package org.opensearch.index.store; + +import java.io.IOException; +import java.nio.file.Path; + +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; +import org.apache.lucene.store.Directory; +import org.apache.lucene.store.FSDirectory; +import org.opensearch.index.engine.Engine; +import org.opensearch.index.engine.EngineConfig; +import org.opensearch.index.engine.EngineFactory; +import org.opensearch.index.engine.InternalEngine; +import org.opensearch.index.store.iv.DefaultKeyIvResolver; +import org.opensearch.index.store.iv.KeyIvResolver; +import org.opensearch.index.translog.CryptoTranslogFactory; + +/** + * A factory that creates engines with crypto-enabled translogs for cryptofs indices. + */ +public class CryptoEngineFactory implements EngineFactory { + + private static final Logger logger = LogManager.getLogger(CryptoEngineFactory.class); + + /** + * Default constructor. + */ + public CryptoEngineFactory() {} + + /** + * {@inheritDoc} + */ + @Override + public Engine newReadWriteEngine(EngineConfig config) { + + try { + // Create a separate KeyIvResolver for translog encryption + KeyIvResolver keyIvResolver = createTranslogKeyIvResolver(config); + + // Create the crypto translog factory using the same KeyIvResolver as the directory + CryptoTranslogFactory cryptoTranslogFactory = new CryptoTranslogFactory(keyIvResolver); + + // Create new engine config by copying all fields from existing config + // but replace the translog factory with our crypto version + EngineConfig cryptoConfig = config + .toBuilder() + .translogFactory(cryptoTranslogFactory) // <- Replace with our crypto factory + .build(); + + // Return the default engine with crypto-enabled translog + return new InternalEngine(cryptoConfig); + } catch (IOException e) { + throw new RuntimeException("Failed to create crypto engine", e); + } + } + + /** + * Create a separate KeyIvResolver for translog encryption. + */ + private KeyIvResolver createTranslogKeyIvResolver(EngineConfig config) throws IOException { + // Create a separate key resolver for translog files + + // Use the translog location for key storage + Path translogPath = config.getTranslogConfig().getTranslogPath(); + Directory keyDirectory = FSDirectory.open(translogPath); + + // Create crypto directory factory to get the key provider + CryptoDirectoryFactory directoryFactory = new CryptoDirectoryFactory(); + + // Create a dedicated key resolver for translog + return new DefaultKeyIvResolver( + keyDirectory, + config.getIndexSettings().getValue(CryptoDirectoryFactory.INDEX_CRYPTO_PROVIDER_SETTING), + directoryFactory.getKeyProvider(config.getIndexSettings()) + ); + } + +} diff --git a/src/main/java/org/opensearch/index/store/cipher/AesGcmCipherFactory.java b/src/main/java/org/opensearch/index/store/cipher/AesGcmCipherFactory.java index 1abe35cc..bf902b9e 100644 --- a/src/main/java/org/opensearch/index/store/cipher/AesGcmCipherFactory.java +++ b/src/main/java/org/opensearch/index/store/cipher/AesGcmCipherFactory.java @@ -1,22 +1,19 @@ /* + * Copyright OpenSearch Contributors * SPDX-License-Identifier: Apache-2.0 - * - * The OpenSearch Contributors require contributions made to - * this file be licensed under the Apache-2.0 license or a - * compatible open source license. */ - package org.opensearch.index.store.cipher; -import javax.crypto.Cipher; -import javax.crypto.NoSuchPaddingException; -import javax.crypto.spec.GCMParameterSpec; import java.security.InvalidAlgorithmParameterException; import java.security.InvalidKeyException; import java.security.Key; import java.security.NoSuchAlgorithmException; import java.security.Provider; +import javax.crypto.Cipher; +import javax.crypto.NoSuchPaddingException; +import javax.crypto.spec.GCMParameterSpec; + public class AesGcmCipherFactory { public static final int GCM_TAG_LENGTH = 16; @@ -126,7 +123,7 @@ public static byte[] encryptWithTag(Key key, byte[] iv, byte[] input, int length System.arraycopy(iv, 0, gcmIv, 0, 12); GCMParameterSpec spec = new GCMParameterSpec(128, gcmIv); cipher.init(Cipher.ENCRYPT_MODE, key, spec); - + return cipher.doFinal(input, 0, length); } catch (Exception e) { throw new JavaCryptoException("GCM encryption with tag failed", e); @@ -153,7 +150,7 @@ public static byte[] decryptWithTag(Key key, byte[] iv, byte[] ciphertext) throw System.arraycopy(iv, 0, gcmIv, 0, 12); GCMParameterSpec spec = new GCMParameterSpec(128, gcmIv); cipher.init(Cipher.DECRYPT_MODE, key, spec); - + return cipher.doFinal(ciphertext); } catch (Exception e) { throw new JavaCryptoException("GCM decryption with tag verification failed", e); diff --git a/src/main/java/org/opensearch/index/store/cipher/OpenSslNativeCipher.java b/src/main/java/org/opensearch/index/store/cipher/OpenSslNativeCipher.java index 896d1bc2..e889e017 100644 --- a/src/main/java/org/opensearch/index/store/cipher/OpenSslNativeCipher.java +++ b/src/main/java/org/opensearch/index/store/cipher/OpenSslNativeCipher.java @@ -4,6 +4,8 @@ */ package org.opensearch.index.store.cipher; +import static org.opensearch.index.store.cipher.AesCipherFactory.computeOffsetIVForAesGcmEncrypted; + import java.lang.foreign.Arena; import java.lang.foreign.FunctionDescriptor; import java.lang.foreign.Linker; @@ -18,7 +20,6 @@ import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; import org.opensearch.common.SuppressForbidden; -import static org.opensearch.index.store.cipher.AesCipherFactory.computeOffsetIVForAesGcmEncrypted; /** * Provides native bindings to OpenSSL EVP_aes_256_ctr using the Java Panama FFI. diff --git a/src/main/java/org/opensearch/index/store/niofs/CryptoOutputStreamIndexOutput.java b/src/main/java/org/opensearch/index/store/niofs/CryptoOutputStreamIndexOutput.java index 6b35dd25..3da22949 100644 --- a/src/main/java/org/opensearch/index/store/niofs/CryptoOutputStreamIndexOutput.java +++ b/src/main/java/org/opensearch/index/store/niofs/CryptoOutputStreamIndexOutput.java @@ -8,14 +8,14 @@ import java.io.IOException; import java.io.OutputStream; import java.nio.file.Path; +import java.security.Key; + +import javax.crypto.Cipher; import org.apache.lucene.store.OutputStreamIndexOutput; import org.opensearch.common.SuppressForbidden; import org.opensearch.index.store.cipher.AesGcmCipherFactory; -import javax.crypto.Cipher; -import java.security.Key; - /** * An IndexOutput implementation that encrypts data before writing using native * OpenSSL AES-CTR. @@ -40,7 +40,8 @@ public final class CryptoOutputStreamIndexOutput extends OutputStreamIndexOutput * @throws IOException If there is an I/O error * @throws IllegalArgumentException If key or iv lengths are invalid */ - public CryptoOutputStreamIndexOutput(String name, Path path, OutputStream os, Key key, byte[] iv, java.security.Provider provider) throws IOException { + public CryptoOutputStreamIndexOutput(String name, Path path, OutputStream os, Key key, byte[] iv, java.security.Provider provider) + throws IOException { super("FSIndexOutput(path=\"" + path + "\")", name, new EncryptedOutputStream(os, key, iv, provider), CHUNK_SIZE); } @@ -106,7 +107,8 @@ private void flushBuffer() throws IOException { private void processAndWrite(byte[] data, int offset, int length) throws IOException { try { - byte[] encrypted = org.opensearch.index.store.cipher.AesGcmCipherFactory.encryptWithoutTag(streamOffset, cipher, slice(data, offset, length), length); + byte[] encrypted = org.opensearch.index.store.cipher.AesGcmCipherFactory + .encryptWithoutTag(streamOffset, cipher, slice(data, offset, length), length); out.write(encrypted); streamOffset += length; } catch (Throwable t) { diff --git a/src/main/java/org/opensearch/index/translog/CryptoChannelFactory.java b/src/main/java/org/opensearch/index/translog/CryptoChannelFactory.java new file mode 100644 index 00000000..1bef2cf2 --- /dev/null +++ b/src/main/java/org/opensearch/index/translog/CryptoChannelFactory.java @@ -0,0 +1,58 @@ +/* + * Copyright OpenSearch Contributors + * SPDX-License-Identifier: Apache-2.0 + */ +package org.opensearch.index.translog; + +import java.io.IOException; +import java.nio.channels.FileChannel; +import java.nio.file.OpenOption; +import java.nio.file.Path; +import java.util.Set; + +import org.opensearch.index.store.iv.KeyIvResolver; + +/** + * A ChannelFactory implementation that creates FileChannels with transparent + * AES-GCM encryption/decryption for translog files. + * + * This factory determines whether to apply encryption based on the file extension: + * - .tlog files: Encrypted using AES-GCM with 8KB authenticated chunks + * - .ckp files: Not encrypted (checkpoint metadata) + * + * Updated to use unified KeyIvResolver (same as index files) for consistent + * key management across all encrypted components. + * + * @opensearch.internal + */ +public class CryptoChannelFactory implements ChannelFactory { + + private final KeyIvResolver keyIvResolver; + private final String translogUUID; + + /** + * Creates a new CryptoChannelFactory. + * + * @param keyIvResolver the key and IV resolver for encryption keys (unified with index files) + * @param translogUUID the translog UUID for exact header size calculation + */ + public CryptoChannelFactory(KeyIvResolver keyIvResolver, String translogUUID) { + if (translogUUID == null) { + throw new IllegalArgumentException("translogUUID is required for exact header size calculation"); + } + this.keyIvResolver = keyIvResolver; + this.translogUUID = translogUUID; + } + + @Override + public FileChannel open(Path path, OpenOption... options) throws IOException { + FileChannel baseChannel = FileChannel.open(path, options); + + if (!path.getFileName().toString().endsWith(".tlog")) { + return baseChannel; + } + + Set optionsSet = Set.of(options); + return new CryptoFileChannelWrapper(baseChannel, keyIvResolver, path, optionsSet, translogUUID); + } +} diff --git a/src/main/java/org/opensearch/index/translog/CryptoFileChannelWrapper.java b/src/main/java/org/opensearch/index/translog/CryptoFileChannelWrapper.java new file mode 100644 index 00000000..d7d2a492 --- /dev/null +++ b/src/main/java/org/opensearch/index/translog/CryptoFileChannelWrapper.java @@ -0,0 +1,283 @@ +/* + * Copyright OpenSearch Contributors + * SPDX-License-Identifier: Apache-2.0 + */ +package org.opensearch.index.translog; + +import java.io.IOException; +import java.nio.ByteBuffer; +import java.nio.MappedByteBuffer; +import java.nio.channels.ClosedChannelException; +import java.nio.channels.FileChannel; +import java.nio.channels.FileLock; +import java.nio.channels.ReadableByteChannel; +import java.nio.channels.WritableByteChannel; +import java.nio.file.OpenOption; +import java.nio.file.Path; +import java.util.Set; +import java.util.concurrent.atomic.AtomicLong; +import java.util.concurrent.locks.ReentrantReadWriteLock; + +import org.opensearch.common.SuppressForbidden; +import org.opensearch.index.store.iv.KeyIvResolver; + +/** + * A FileChannel wrapper that provides transparent AES-GCM encryption/decryption + * for translog files using 8KB authenticated chunks. + * + * This implementation delegates chunking logic to TranslogChunkManager while + * handling FileChannel lifecycle and position tracking. + * + * File Format: + * [TranslogHeader - Unencrypted] + * [Chunk 0: ≤8KB encrypted + 16B auth tag] + * [Chunk 1: ≤8KB encrypted + 16B auth tag] + * ... + * [Last Chunk: ≤8KB encrypted + 16B auth tag] + * + * @opensearch.internal + */ +@SuppressForbidden(reason = "FileChannel operations required for encrypted translog implementation") +public class CryptoFileChannelWrapper extends FileChannel { + + private final FileChannel delegate; + private final TranslogChunkManager chunkManager; + private final AtomicLong position; + private final ReentrantReadWriteLock positionLock; + private volatile boolean closed = false; + + /** + * Creates a new CryptoFileChannelWrapper that wraps the provided FileChannel. + * + * @param delegate the underlying FileChannel to wrap + * @param keyIvResolver the key and IV resolver for encryption (unified with index files) + * @param path the file path (used for logging and debugging) + * @param options the file open options (used for logging and debugging) + * @param translogUUID the translog UUID for exact header size calculation + * @throws IOException if there is an error setting up the channel + */ + public CryptoFileChannelWrapper( + FileChannel delegate, + KeyIvResolver keyIvResolver, + Path path, + Set options, + String translogUUID + ) + throws IOException { + this.delegate = delegate; + this.chunkManager = new TranslogChunkManager(delegate, keyIvResolver, path, translogUUID); + this.position = new AtomicLong(delegate.position()); + this.positionLock = new ReentrantReadWriteLock(); + } + + @Override + public int read(ByteBuffer dst) throws IOException { + ensureOpen(); + if (dst.remaining() == 0) { + return 0; + } + + // updates channel position, needs writeLock for position update + positionLock.writeLock().lock(); + try { + long currentPosition = position.get(); + int bytesRead = readAtPosition(dst, currentPosition); + if (bytesRead > 0) { + position.addAndGet(bytesRead); + } + return bytesRead; + } finally { + positionLock.writeLock().unlock(); + } + } + + @Override + public int read(ByteBuffer dst, long position) throws IOException { + ensureOpen(); + if (dst.remaining() == 0) { + return 0; + } + + // Positional read: does NOT update channel position, can use readLock for better concurrency + positionLock.readLock().lock(); + try { + return readAtPosition(dst, position); + } finally { + positionLock.readLock().unlock(); + } + } + + /** + * Internal method to read from a specific position without updating the channel position. + * This method is used by both stateful and positional read methods. + */ + private int readAtPosition(ByteBuffer dst, long position) throws IOException { + // Delegate to chunk manager for all read operations + return chunkManager.readFromChunks(dst, position); + } + + @Override + public long read(ByteBuffer[] dsts, int offset, int length) throws IOException { + ensureOpen(); + + long totalBytesRead = 0; + long currentPosition = position.get(); + + for (int i = offset; i < offset + length && i < dsts.length; i++) { + ByteBuffer dst = dsts[i]; + if (dst.remaining() > 0) { + int bytesRead = read(dst, currentPosition + totalBytesRead); + if (bytesRead <= 0) { + break; + } + totalBytesRead += bytesRead; + } + } + + if (totalBytesRead > 0) { + position.addAndGet(totalBytesRead); + } + + return totalBytesRead; + } + + @Override + public int write(ByteBuffer src) throws IOException { + long currentPosition = position.get(); + int bytesWritten = write(src, currentPosition); + if (bytesWritten > 0) { + position.addAndGet(bytesWritten); + } + return bytesWritten; + } + + @Override + public int write(ByteBuffer src, long position) throws IOException { + ensureOpen(); + if (src.remaining() == 0) { + return 0; + } + + positionLock.writeLock().lock(); + try { + // Delegate to chunk manager for all write operations + return chunkManager.writeToChunks(src, position); + } finally { + positionLock.writeLock().unlock(); + } + } + + @Override + public long write(ByteBuffer[] srcs, int offset, int length) throws IOException { + ensureOpen(); + + long totalBytesWritten = 0; + long currentPosition = position.get(); + + for (int i = offset; i < offset + length && i < srcs.length; i++) { + ByteBuffer src = srcs[i]; + if (src.remaining() > 0) { + int bytesWritten = write(src, currentPosition + totalBytesWritten); + if (bytesWritten <= 0) { + break; + } + totalBytesWritten += bytesWritten; + } + } + + if (totalBytesWritten > 0) { + position.addAndGet(totalBytesWritten); + } + + return totalBytesWritten; + } + + @Override + public long position() throws IOException { + ensureOpen(); + return position.get(); + } + + @Override + public FileChannel position(long newPosition) throws IOException { + ensureOpen(); + delegate.position(newPosition); + position.set(newPosition); + return this; + } + + @Override + public long size() throws IOException { + ensureOpen(); + return delegate.size(); + } + + @Override + public FileChannel truncate(long size) throws IOException { + ensureOpen(); + delegate.truncate(size); + long currentPosition = position.get(); + if (currentPosition > size) { + position.set(size); + } + return this; + } + + @Override + public void force(boolean metaData) throws IOException { + ensureOpen(); + delegate.force(metaData); + } + + @Override + public long transferTo(long position, long count, WritableByteChannel target) throws IOException { + ensureOpen(); + // Delegate to chunk manager for encrypted transfer + return chunkManager.transferFromChunks(position, count, target); + } + + @Override + public long transferFrom(ReadableByteChannel src, long position, long count) throws IOException { + ensureOpen(); + // Delegate to chunk manager for encrypted transfer + return chunkManager.transferToChunks(src, position, count); + } + + @Override + public FileLock lock(long position, long size, boolean shared) throws IOException { + ensureOpen(); + return delegate.lock(position, size, shared); + } + + @Override + public FileLock tryLock(long position, long size, boolean shared) throws IOException { + ensureOpen(); + return delegate.tryLock(position, size, shared); + } + + @Override + public MappedByteBuffer map(MapMode mode, long position, long size) throws IOException { + ensureOpen(); + + // For encrypted files, we cannot support memory mapping directly + // because the mapped memory would contain encrypted data + throw new UnsupportedOperationException( + "Memory mapping is not supported for encrypted translog files. " + + "Encrypted files require data to be decrypted during read operations." + ); + } + + @Override + protected void implCloseChannel() throws IOException { + if (!closed) { + closed = true; + delegate.close(); + } + } + + private void ensureOpen() throws ClosedChannelException { + if (closed || !delegate.isOpen()) { + throw new ClosedChannelException(); + } + } +} diff --git a/src/main/java/org/opensearch/index/translog/CryptoTranslog.java b/src/main/java/org/opensearch/index/translog/CryptoTranslog.java new file mode 100644 index 00000000..78fdbb2f --- /dev/null +++ b/src/main/java/org/opensearch/index/translog/CryptoTranslog.java @@ -0,0 +1,173 @@ +/* + * Copyright OpenSearch Contributors + * SPDX-License-Identifier: Apache-2.0 + */ +package org.opensearch.index.translog; + +import java.io.IOException; +import java.util.function.LongConsumer; +import java.util.function.LongSupplier; + +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; +import org.opensearch.index.store.iv.KeyIvResolver; + +/** + * A Translog implementation that provides AES-GCM encryption capabilities. + * + * This class extends LocalTranslog and injects a CryptoChannelFactory during construction + * to ensure that all translog file operations go through encrypted channels. + * + * Translog files (.tlog) are encrypted using AES-GCM with 8KB authenticated chunks. + * Each chunk includes a 16-byte authentication tag for data integrity verification. + * Checkpoint files (.ckp) remain unencrypted for performance and compatibility. + * + * Uses unified KeyIvResolver (same as index files) for consistent + * key management across all encrypted components. + * + * @opensearch.internal + */ +public class CryptoTranslog extends LocalTranslog { + + private static final Logger logger = LogManager.getLogger(CryptoTranslog.class); + + private final KeyIvResolver keyIvResolver; + private final String translogUUID; + + /** + * Creates a new CryptoTranslog with AES-GCM encryption. + * + * @param config the translog configuration + * @param translogUUID the translog UUID + * @param deletionPolicy the deletion policy + * @param globalCheckpointSupplier the global checkpoint supplier + * @param primaryTermSupplier the primary term supplier + * @param persistedSequenceNumberConsumer the persisted sequence number consumer + * @param keyIvResolver the key and IV resolver for encryption (unified with index files) + * @throws IOException if translog creation fails + */ + public CryptoTranslog( + TranslogConfig config, + String translogUUID, + TranslogDeletionPolicy deletionPolicy, + LongSupplier globalCheckpointSupplier, + LongSupplier primaryTermSupplier, + LongConsumer persistedSequenceNumberConsumer, + KeyIvResolver keyIvResolver + ) + throws IOException { + + super( + config, + translogUUID, + deletionPolicy, + globalCheckpointSupplier, + primaryTermSupplier, + persistedSequenceNumberConsumer, + TranslogOperationHelper.DEFAULT, + createCryptoChannelFactory(keyIvResolver, translogUUID) + ); + + // Strict validation after super() - never allow null components + if (keyIvResolver == null || translogUUID == null) { + throw new IllegalArgumentException( + "Cannot create CryptoTranslog without keyIvResolver and translogUUID. " + + "Required for translog encryption. keyIvResolver=" + + keyIvResolver + + ", translogUUID=" + + translogUUID + ); + } + + // Initialize instance fields + this.keyIvResolver = keyIvResolver; + this.translogUUID = translogUUID; + + logger.info("CryptoTranslog initialized for translog: {}", translogUUID); + } + + /** + * Creates a new CryptoTranslog with AES-GCM encryption and custom TranslogOperationHelper. + * + * @param config the translog configuration + * @param translogUUID the translog UUID + * @param deletionPolicy the deletion policy + * @param globalCheckpointSupplier the global checkpoint supplier + * @param primaryTermSupplier the primary term supplier + * @param persistedSequenceNumberConsumer the persisted sequence number consumer + * @param translogOperationHelper the translog operation helper + * @param keyIvResolver the key and IV resolver for encryption (unified with index files) + * @throws IOException if translog creation fails + */ + public CryptoTranslog( + TranslogConfig config, + String translogUUID, + TranslogDeletionPolicy deletionPolicy, + LongSupplier globalCheckpointSupplier, + LongSupplier primaryTermSupplier, + LongConsumer persistedSequenceNumberConsumer, + TranslogOperationHelper translogOperationHelper, + KeyIvResolver keyIvResolver + ) + throws IOException { + + super( + config, + translogUUID, + deletionPolicy, + globalCheckpointSupplier, + primaryTermSupplier, + persistedSequenceNumberConsumer, + translogOperationHelper, + createCryptoChannelFactory(keyIvResolver, translogUUID) + ); + + // Strict validation after super() - never allow null components + if (keyIvResolver == null || translogUUID == null) { + throw new IllegalArgumentException( + "Cannot create CryptoTranslog without keyIvResolver and translogUUID. " + + "Required for translog encryption. keyIvResolver=" + + keyIvResolver + + ", translogUUID=" + + translogUUID + ); + } + + // Initialize instance fields + this.keyIvResolver = keyIvResolver; + this.translogUUID = translogUUID; + + logger.info("CryptoTranslog initialized for translog: {}", translogUUID); + } + + /** + * Helper method to create CryptoChannelFactory for constructor use. + * This is needed because Java requires super() to be the first statement. + * Returns ChannelFactory interface type to match LocalTranslog constructor signature. + */ + private static ChannelFactory createCryptoChannelFactory(KeyIvResolver keyIvResolver, String translogUUID) throws IOException { + try { + CryptoChannelFactory channelFactory = new CryptoChannelFactory(keyIvResolver, translogUUID); + logger.debug("CryptoChannelFactory initialized for translog: {}", translogUUID); + return channelFactory; + } catch (Exception e) { + logger.error("Failed to initialize CryptoChannelFactory: {}", e.getMessage(), e); + throw new IOException( + "Failed to initialize crypto channel factory for translog encryption. " + "Cannot proceed without encryption!", + e + ); + } + } + + /** + * Ensure proper cleanup of crypto resources. + */ + @Override + public void close() throws IOException { + try { + super.close(); + } finally { + logger.debug("CryptoTranslog closed - encrypted translog files"); + } + } +} diff --git a/src/main/java/org/opensearch/index/translog/CryptoTranslogFactory.java b/src/main/java/org/opensearch/index/translog/CryptoTranslogFactory.java new file mode 100644 index 00000000..e96b6e06 --- /dev/null +++ b/src/main/java/org/opensearch/index/translog/CryptoTranslogFactory.java @@ -0,0 +1,81 @@ +/* + * Copyright OpenSearch Contributors + * SPDX-License-Identifier: Apache-2.0 + */ +package org.opensearch.index.translog; + +import java.io.IOException; +import java.util.function.BooleanSupplier; +import java.util.function.LongConsumer; +import java.util.function.LongSupplier; + +import org.opensearch.index.store.iv.KeyIvResolver; + +/** + * A factory for creating crypto-enabled translogs that use unified key management. + * This factory creates translog instances that use the same KeyIvResolver as index files + * for consistent key management across all encrypted components. + */ +public class CryptoTranslogFactory implements TranslogFactory { + + private final KeyIvResolver keyIvResolver; + + /** + * Constructor for CryptoTranslogFactory. + * + * @param keyIvResolver the unified key/IV resolver (same as used by index files) + */ + public CryptoTranslogFactory(KeyIvResolver keyIvResolver) { + this.keyIvResolver = keyIvResolver; + } + + @Override + public Translog newTranslog( + TranslogConfig config, + String translogUUID, + TranslogDeletionPolicy deletionPolicy, + LongSupplier globalCheckpointSupplier, + LongSupplier primaryTermSupplier, + LongConsumer persistedSequenceNumberConsumer, + BooleanSupplier startedPrimarySupplier + ) throws IOException { + + CryptoTranslog cryptoTranslog = new CryptoTranslog( + config, + translogUUID, + deletionPolicy, + globalCheckpointSupplier, + primaryTermSupplier, + persistedSequenceNumberConsumer, + keyIvResolver + ); + + return cryptoTranslog; + } + + @Override + public Translog newTranslog( + TranslogConfig config, + String translogUUID, + TranslogDeletionPolicy deletionPolicy, + LongSupplier globalCheckpointSupplier, + LongSupplier primaryTermSupplier, + LongConsumer persistedSequenceNumberConsumer, + BooleanSupplier startedPrimarySupplier, + TranslogOperationHelper translogOperationHelper + ) throws IOException { + + CryptoTranslog cryptoTranslog = new CryptoTranslog( + config, + translogUUID, + deletionPolicy, + globalCheckpointSupplier, + primaryTermSupplier, + persistedSequenceNumberConsumer, + translogOperationHelper, + keyIvResolver + ); + + return cryptoTranslog; + } +} diff --git a/src/main/java/org/opensearch/index/translog/TranslogChunkManager.java b/src/main/java/org/opensearch/index/translog/TranslogChunkManager.java new file mode 100644 index 00000000..17752020 --- /dev/null +++ b/src/main/java/org/opensearch/index/translog/TranslogChunkManager.java @@ -0,0 +1,392 @@ +/* + * Copyright OpenSearch Contributors + * SPDX-License-Identifier: Apache-2.0 + */ +package org.opensearch.index.translog; + +import static org.opensearch.index.store.cipher.AesCipherFactory.computeOffsetIVForAesGcmEncrypted; + +import java.io.IOException; +import java.nio.ByteBuffer; +import java.nio.channels.FileChannel; +import java.nio.channels.NonReadableChannelException; +import java.nio.channels.ReadableByteChannel; +import java.nio.channels.WritableByteChannel; +import java.nio.charset.StandardCharsets; +import java.nio.file.Path; +import java.security.Key; + +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; +import org.apache.lucene.codecs.CodecUtil; +import org.opensearch.common.SuppressForbidden; +import org.opensearch.index.store.cipher.AesGcmCipherFactory; +import org.opensearch.index.store.iv.KeyIvResolver; + +/** + * Manages 8KB encrypted chunks for translog files using AES-GCM authentication. + * Handles chunk positioning, encryption, decryption, and I/O operations. + * + * This class separates chunking logic from FileChannel delegation, making the code + * more maintainable and testable. + * + * @opensearch.internal + */ +@SuppressForbidden(reason = "Channel operations required for chunk-based encryption") +public class TranslogChunkManager { + + private static final Logger logger = LogManager.getLogger(TranslogChunkManager.class); + + // GCM chunk constants + public static final int GCM_CHUNK_SIZE = 8192; // 8KB data per chunk + public static final int GCM_TAG_SIZE = AesGcmCipherFactory.GCM_TAG_LENGTH; // 16 bytes auth tag + public static final int CHUNK_WITH_TAG_SIZE = GCM_CHUNK_SIZE + GCM_TAG_SIZE; // 8208 bytes max + + private final FileChannel delegate; + private final KeyIvResolver keyIvResolver; + private final Path filePath; + private final String translogUUID; + + // Header size - calculated exactly using TranslogHeader.headerSizeInBytes() + private volatile int actualHeaderSize = -1; + + /** + * Helper class for chunk position mapping + */ + public static class ChunkInfo { + public final int chunkIndex; // Which chunk (0, 1, 2, ...) + public final int offsetInChunk; // Position within the 8KB chunk + public final long diskPosition; // Actual file position of chunk start + + public ChunkInfo(int chunkIndex, int offsetInChunk, long diskPosition) { + this.chunkIndex = chunkIndex; + this.offsetInChunk = offsetInChunk; + this.diskPosition = diskPosition; + } + } + + /** + * Creates a new TranslogChunkManager for managing encrypted chunks. + * + * @param delegate the underlying FileChannel for actual I/O operations + * @param keyIvResolver the key and IV resolver for encryption operations + * @param filePath the file path (used for logging and debugging) + * @param translogUUID the translog UUID for exact header size calculation + */ + public TranslogChunkManager(FileChannel delegate, KeyIvResolver keyIvResolver, Path filePath, String translogUUID) { + if (translogUUID == null) { + throw new IllegalArgumentException("translogUUID is required for exact header size calculation"); + } + this.delegate = delegate; + this.keyIvResolver = keyIvResolver; + this.filePath = filePath; + this.translogUUID = translogUUID; + } + + /** + * Determines the exact header size using local calculation to avoid cross-classloader access. + * This replicates the exact same logic as TranslogHeader.headerSizeInBytes() method. + */ + public int determineHeaderSize() { + if (actualHeaderSize > 0) { + return actualHeaderSize; + } + + String fileName = filePath.getFileName().toString(); + if (fileName.endsWith(".tlog")) { + actualHeaderSize = calculateTranslogHeaderSize(translogUUID); + logger.debug("Calculated exact header size: {} bytes for {} with UUID: {}", actualHeaderSize, filePath, translogUUID); + } else { + // Non-translog files (.ckp) don't need encryption anyway + actualHeaderSize = 0; + logger.debug("Non-translog file {}, header size: 0", filePath); + } + + return actualHeaderSize; + } + + /** + * Local implementation of TranslogHeader.headerSizeInBytes() to avoid cross-classloader access issues. + * This replicates the exact same calculation as the original method. + * + * @param translogUUID the translog UUID + * @return the header size in bytes + */ + private static int calculateTranslogHeaderSize(String translogUUID) { + int uuidLength = translogUUID.getBytes(StandardCharsets.UTF_8).length; + + // Calculate header size using official TranslogHeader constants + int size = CodecUtil.headerLength(TranslogHeader.TRANSLOG_CODEC); // Lucene codec header + size += Integer.BYTES + uuidLength; // uuid length field + uuid bytes + + if (TranslogHeader.CURRENT_VERSION >= TranslogHeader.VERSION_PRIMARY_TERM) { + size += Long.BYTES; // primary term + size += Integer.BYTES; // checksum + } + + return size; + } + + /** + * Maps a file position to chunk information. + */ + public ChunkInfo getChunkInfo(long filePosition) { + long dataPosition = filePosition - determineHeaderSize(); + int chunkIndex = (int) (dataPosition / GCM_CHUNK_SIZE); + int offsetInChunk = (int) (dataPosition % GCM_CHUNK_SIZE); + long diskPosition = determineHeaderSize() + ((long) chunkIndex * CHUNK_WITH_TAG_SIZE); + return new ChunkInfo(chunkIndex, offsetInChunk, diskPosition); + } + + /** + * Checks if we can read a chunk at the given disk position. + * Returns false for write-only channels or if chunk doesn't exist. + */ + public boolean canReadChunk(long diskPosition) { + try { + // Check if position is beyond current file size (new chunk) + if (diskPosition >= delegate.size()) { + return false; + } + + // Test if channel is readable by attempting a zero-byte read + ByteBuffer testBuffer = ByteBuffer.allocate(0); + delegate.read(testBuffer, diskPosition); + return true; + + } catch (NonReadableChannelException e) { + // Channel is write-only + return false; + } catch (IOException e) { + // Other read errors - assume can't read + return false; + } + } + + /** + * Reads and decrypts a complete chunk from disk. + * Returns empty array if chunk doesn't exist or channel is write-only. + */ + public byte[] readAndDecryptChunk(int chunkIndex) throws IOException { + try { + // Calculate disk position for this chunk + long diskPosition = determineHeaderSize() + ((long) chunkIndex * CHUNK_WITH_TAG_SIZE); + + // Check if chunk exists and we can read it + if (!canReadChunk(diskPosition)) { + return new byte[0]; // New chunk or write-only channel + } + + // Read encrypted chunk + tag from disk + ByteBuffer buffer = ByteBuffer.allocate(CHUNK_WITH_TAG_SIZE); + int bytesRead = delegate.read(buffer, diskPosition); + if (bytesRead <= GCM_TAG_SIZE) { + return new byte[0]; // Empty or invalid chunk + } + + // Extract encrypted data with tag + byte[] encryptedWithTag = new byte[bytesRead]; + buffer.flip(); + buffer.get(encryptedWithTag); + + // Use existing key management + Key key = keyIvResolver.getDataKey(); + byte[] baseIV = keyIvResolver.getIvBytes(); + + // Use existing IV computation for this chunk + long chunkOffset = (long) chunkIndex * GCM_CHUNK_SIZE; + byte[] chunkIV = computeOffsetIVForAesGcmEncrypted(baseIV, chunkOffset); + + // Use existing GCM decryption with authentication + return AesGcmCipherFactory.decryptWithTag(key, chunkIV, encryptedWithTag); + + } catch (Exception e) { + throw new IOException("Failed to decrypt chunk " + chunkIndex, e); + } + } + + /** + * Encrypts and writes a complete chunk to disk. + */ + public void encryptAndWriteChunk(int chunkIndex, byte[] plainData) throws IOException { + try { + // Use existing key management + Key key = keyIvResolver.getDataKey(); + byte[] baseIV = keyIvResolver.getIvBytes(); + + // Use existing IV computation for this chunk + long chunkOffset = (long) chunkIndex * GCM_CHUNK_SIZE; + byte[] chunkIV = computeOffsetIVForAesGcmEncrypted(baseIV, chunkOffset); + + // Use existing GCM encryption (includes authentication tag) + byte[] encryptedWithTag = AesGcmCipherFactory.encryptWithTag(key, chunkIV, plainData, plainData.length); + + // Write to disk at chunk position + long diskPosition = determineHeaderSize() + ((long) chunkIndex * CHUNK_WITH_TAG_SIZE); + ByteBuffer buffer = ByteBuffer.wrap(encryptedWithTag); + delegate.write(buffer, diskPosition); + + } catch (Exception e) { + throw new IOException("Failed to encrypt chunk " + chunkIndex + " in file " + filePath, e); + } + } + + /** + * Reads data from encrypted chunks at the specified position. + * This method handles chunk boundary crossing and decryption. + * + * @param dst the buffer to read data into + * @param position the file position to read from + * @return the number of bytes read + * @throws IOException if reading fails + */ + public int readFromChunks(ByteBuffer dst, long position) throws IOException { + if (dst.remaining() == 0) { + return 0; + } + + int headerSize = determineHeaderSize(); + + // Header reads remain unchanged + if (position < headerSize) { + return delegate.read(dst, position); + } + + // Chunk-based reading for encrypted data + ChunkInfo chunkInfo = getChunkInfo(position); + + // Read and decrypt the needed chunk + byte[] decryptedChunk = readAndDecryptChunk(chunkInfo.chunkIndex); + + // Extract requested data from decrypted chunk + int available = Math.max(0, decryptedChunk.length - chunkInfo.offsetInChunk); + int toRead = Math.min(dst.remaining(), available); + + if (toRead > 0) { + dst.put(decryptedChunk, chunkInfo.offsetInChunk, toRead); + } + + return toRead; + } + + /** + * Writes data to encrypted chunks at the specified position. + * This method handles chunk boundary crossing and encryption using read-modify-write pattern. + * + * @param src the buffer containing data to write + * @param position the file position to write to + * @return the number of bytes written + * @throws IOException if writing fails + */ + public int writeToChunks(ByteBuffer src, long position) throws IOException { + if (src.remaining() == 0) { + return 0; + } + + int headerSize = determineHeaderSize(); + + // Header writes remain unchanged + if (position < headerSize) { + return delegate.write(src, position); + } + + // Chunk-based encrypted writes + ChunkInfo chunkInfo = getChunkInfo(position); + + // Read-modify-write chunk pattern + byte[] existingChunk = readAndDecryptChunk(chunkInfo.chunkIndex); + + // Expand chunk buffer if needed + int requiredSize = chunkInfo.offsetInChunk + src.remaining(); + byte[] modifiedChunk = existingChunk; + if (existingChunk.length < requiredSize) { + modifiedChunk = new byte[Math.min(requiredSize, GCM_CHUNK_SIZE)]; + System.arraycopy(existingChunk, 0, modifiedChunk, 0, existingChunk.length); + } + + // Apply modifications + int bytesToWrite = Math.min(src.remaining(), GCM_CHUNK_SIZE - chunkInfo.offsetInChunk); + src.get(modifiedChunk, chunkInfo.offsetInChunk, bytesToWrite); + + // Encrypt and write back + encryptAndWriteChunk(chunkInfo.chunkIndex, modifiedChunk); + + return bytesToWrite; + } + + /** + * Transfers data from encrypted chunks to a target channel. + * This method decrypts data during transfer. + * + * @param position the starting position in the source + * @param count the number of bytes to transfer + * @param target the target channel to write to + * @return the number of bytes transferred + * @throws IOException if transfer fails + */ + public long transferFromChunks(long position, long count, WritableByteChannel target) throws IOException { + long transferred = 0; + long remaining = count; + ByteBuffer buffer = ByteBuffer.allocate(GCM_CHUNK_SIZE); + + while (remaining > 0 && transferred < count) { + buffer.clear(); + int toRead = (int) Math.min(buffer.remaining(), remaining); + buffer.limit(toRead); + + int bytesRead = readFromChunks(buffer, position + transferred); + if (bytesRead <= 0) { + break; + } + + buffer.flip(); + int bytesWritten = target.write(buffer); + transferred += bytesWritten; + remaining -= bytesWritten; + + if (bytesWritten < bytesRead) { + break; + } + } + + return transferred; + } + + /** + * Transfers data from a source channel to encrypted chunks. + * This method encrypts data during transfer. + * + * @param src the source channel to read from + * @param position the starting position in the target + * @param count the number of bytes to transfer + * @return the number of bytes transferred + * @throws IOException if transfer fails + */ + public long transferToChunks(ReadableByteChannel src, long position, long count) throws IOException { + long transferred = 0; + long remaining = count; + ByteBuffer buffer = ByteBuffer.allocate(GCM_CHUNK_SIZE); + + while (remaining > 0 && transferred < count) { + buffer.clear(); + int toRead = (int) Math.min(buffer.remaining(), remaining); + buffer.limit(toRead); + + int bytesRead = src.read(buffer); + if (bytesRead <= 0) { + break; + } + + buffer.flip(); + int bytesWritten = writeToChunks(buffer, position + transferred); + transferred += bytesWritten; + remaining -= bytesWritten; + + if (bytesWritten < bytesRead) { + break; + } + } + + return transferred; + } +} diff --git a/src/test/java/org/opensearch/index/store/CryptoDirectoryTests.java b/src/test/java/org/opensearch/index/store/CryptoDirectoryTests.java index 235e5afd..dd59dded 100644 --- a/src/test/java/org/opensearch/index/store/CryptoDirectoryTests.java +++ b/src/test/java/org/opensearch/index/store/CryptoDirectoryTests.java @@ -4,6 +4,8 @@ */ package org.opensearch.index.store; +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertNotNull; import static org.mockito.Mockito.mock; import static org.mockito.Mockito.when; @@ -29,9 +31,6 @@ import org.opensearch.index.store.iv.KeyIvResolver; import org.opensearch.index.store.niofs.CryptoNIOFSDirectory; -import static org.junit.Assert.assertEquals; -import static org.junit.Assert.assertNotNull; - /** * SMB Tests using NIO FileSystem as index store type. */ @@ -168,39 +167,39 @@ public void testRandomAccessWithCryptoOutput() throws Exception { String fileName = "test-random-access"; int blockSize = 16; int dataSize = blockSize * 3; - + // Generate predictable random data byte[] testData = new byte[dataSize]; java.util.Random rnd = new java.util.Random(42); // Fixed seed for predictability rnd.nextBytes(testData); - + // Write data using CryptoOutput try (IndexOutput output = dir.createOutput(fileName, newIOContext(random()))) { output.writeBytes(testData, testData.length); } - + // Read randomly at different positions try (IndexInput input = dir.openInput(fileName, newIOContext(random()))) { // Test reading from start input.seek(0); assertEquals(testData[0], input.readByte()); - + // Test reading from middle of first block input.seek(8); assertEquals(testData[8], input.readByte()); - + // Test reading from start of second block input.seek(blockSize); assertEquals(testData[blockSize], input.readByte()); - + // Test reading from middle of second block input.seek(blockSize + 8); assertEquals(testData[blockSize + 8], input.readByte()); - + // Test reading from start of third block input.seek(blockSize * 2); assertEquals(testData[blockSize * 2], input.readByte()); - + // Test reading multiple bytes at random position input.seek(5); byte[] buffer = new byte[10]; diff --git a/src/test/java/org/opensearch/index/store/cipher/CipherEncryptionDecryptionTests.java b/src/test/java/org/opensearch/index/store/cipher/CipherEncryptionDecryptionTests.java index f8019945..63f4aa76 100644 --- a/src/test/java/org/opensearch/index/store/cipher/CipherEncryptionDecryptionTests.java +++ b/src/test/java/org/opensearch/index/store/cipher/CipherEncryptionDecryptionTests.java @@ -4,14 +4,15 @@ */ package org.opensearch.index.store.cipher; +import static org.junit.Assert.assertArrayEquals; + +import java.nio.charset.StandardCharsets; +import java.security.Security; +import java.util.Arrays; + import javax.crypto.Cipher; import javax.crypto.spec.IvParameterSpec; import javax.crypto.spec.SecretKeySpec; -import java.util.Arrays; -import java.security.Security; -import java.nio.charset.StandardCharsets; - -import static org.junit.Assert.assertArrayEquals; /** * Tests for cipher encryption and decryption operations @@ -31,52 +32,52 @@ public void testEncryptDecryptWithCTR() throws Exception { // Get cipher from pool Cipher cipher = AesCipherFactory.CIPHER_POOL.get(); SecretKeySpec keySpec = new SecretKeySpec(TEST_KEY, "AES"); - + // Encrypt cipher.init(Cipher.ENCRYPT_MODE, keySpec, new IvParameterSpec(TEST_IV)); byte[] encrypted = cipher.update(TEST_DATA); - + // Decrypt cipher.init(Cipher.DECRYPT_MODE, keySpec, new IvParameterSpec(TEST_IV)); byte[] decrypted = cipher.update(encrypted); - + assertArrayEquals(TEST_DATA, decrypted); } public void testEncryptDecryptWithGCM() throws Exception { SecretKeySpec keySpec = new SecretKeySpec(TEST_KEY, "AES"); - + // Encrypt with tag byte[] encryptedWithTag = AesGcmCipherFactory.encryptWithTag(keySpec, TEST_IV, TEST_DATA, TEST_DATA.length); - + // Decrypt with tag verification byte[] decrypted = AesGcmCipherFactory.decryptWithTag(keySpec, TEST_IV, encryptedWithTag); - + assertArrayEquals(TEST_DATA, decrypted); } public void testEncryptWithGcmAndDecryptWithCTR() throws Exception { SecretKeySpec keySpec = new SecretKeySpec(TEST_KEY, "AES"); - + // Encrypt with GCM without tag Cipher gcmCipher = AesGcmCipherFactory.getCipher(Security.getProvider("SunJCE")); AesGcmCipherFactory.initCipher(gcmCipher, keySpec, TEST_IV, Cipher.ENCRYPT_MODE, 0); byte[] gcmEncrypted = AesGcmCipherFactory.encryptWithoutTag(0, gcmCipher, TEST_DATA, TEST_DATA.length); - + // Get remaining bytes from GCM cipher byte[] remaining = AesGcmCipherFactory.finalizeAndGetTag(gcmCipher); - + // Combine encrypted data with remaining bytes (excluding tag) byte[] fullEncrypted = new byte[gcmEncrypted.length + remaining.length - AesGcmCipherFactory.GCM_TAG_LENGTH]; System.arraycopy(gcmEncrypted, 0, fullEncrypted, 0, gcmEncrypted.length); System.arraycopy(remaining, 0, fullEncrypted, gcmEncrypted.length, remaining.length - AesGcmCipherFactory.GCM_TAG_LENGTH); - + // Decrypt with CTR using offset IV Cipher ctrCipher = AesCipherFactory.CIPHER_POOL.get(); byte[] offsetIV = AesCipherFactory.computeOffsetIVForAesGcmEncrypted(TEST_IV, 0); ctrCipher.init(Cipher.DECRYPT_MODE, keySpec, new IvParameterSpec(offsetIV)); byte[] decrypted = ctrCipher.update(fullEncrypted); - + assertArrayEquals(TEST_DATA, decrypted); } -} \ No newline at end of file +} diff --git a/src/test/java/org/opensearch/index/store/mmap/LazyDecryptedRaceConditionTests.java b/src/test/java/org/opensearch/index/store/mmap/LazyDecryptedRaceConditionTests.java index e856e9b1..68a9d764 100644 --- a/src/test/java/org/opensearch/index/store/mmap/LazyDecryptedRaceConditionTests.java +++ b/src/test/java/org/opensearch/index/store/mmap/LazyDecryptedRaceConditionTests.java @@ -4,6 +4,10 @@ */ package org.opensearch.index.store.mmap; +import static org.junit.Assert.assertArrayEquals; +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertTrue; + import java.lang.foreign.Arena; import java.lang.foreign.MemorySegment; import java.lang.foreign.ValueLayout; @@ -14,9 +18,6 @@ import java.util.concurrent.atomic.AtomicReference; import org.junit.After; -import static org.junit.Assert.assertArrayEquals; -import static org.junit.Assert.assertEquals; -import static org.junit.Assert.assertTrue; import org.junit.Before; import org.junit.Test; import org.opensearch.common.SuppressForbidden; diff --git a/src/test/java/org/opensearch/index/translog/CryptoTranslogEncryptionTests.java b/src/test/java/org/opensearch/index/translog/CryptoTranslogEncryptionTests.java new file mode 100644 index 00000000..b54894da --- /dev/null +++ b/src/test/java/org/opensearch/index/translog/CryptoTranslogEncryptionTests.java @@ -0,0 +1,177 @@ +/* + * Copyright OpenSearch Contributors + * SPDX-License-Identifier: Apache-2.0 + */ +package org.opensearch.index.translog; + +import java.io.IOException; +import java.nio.ByteBuffer; +import java.nio.channels.FileChannel; +import java.nio.charset.StandardCharsets; +import java.nio.file.Files; +import java.nio.file.Path; +import java.nio.file.StandardOpenOption; +import java.security.Provider; +import java.security.Security; + +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; +import org.opensearch.common.SuppressForbidden; +import org.opensearch.common.crypto.MasterKeyProvider; +import org.opensearch.common.settings.Settings; +import org.opensearch.index.store.iv.DefaultKeyIvResolver; +import org.opensearch.index.store.iv.KeyIvResolver; +import org.opensearch.test.OpenSearchTestCase; + +/** + * Verify that translog data encryption actually works. + */ +public class CryptoTranslogEncryptionTests extends OpenSearchTestCase { + + private static final Logger logger = LogManager.getLogger(CryptoTranslogEncryptionTests.class); + + private Path tempDir; + private KeyIvResolver keyIvResolver; + private MasterKeyProvider keyProvider; + + @Override + @SuppressForbidden(reason = "Creating temp directory for test purposes") + public void setUp() throws Exception { + super.setUp(); + tempDir = Files.createTempDirectory("crypto-translog-encryption-test"); + + Settings settings = Settings.builder().put("index.store.crypto.provider", "SunJCE").put("index.store.kms.type", "test").build(); + + Provider cryptoProvider = Security.getProvider("SunJCE"); + + // Create a mock key provider for testing + keyProvider = new MasterKeyProvider() { + @Override + public java.util.Map getEncryptionContext() { + return java.util.Collections.singletonMap("test-key", "test-value"); + } + + @Override + public byte[] decryptKey(byte[] encryptedKey) { + return new byte[32]; // 256-bit key + } + + @Override + public String getKeyId() { + return "test-key-id"; + } + + @Override + public org.opensearch.common.crypto.DataKeyPair generateDataPair() { + byte[] rawKey = new byte[32]; + byte[] encryptedKey = new byte[32]; + return new org.opensearch.common.crypto.DataKeyPair(rawKey, encryptedKey); + } + + @Override + public void close() { + // No resources to close + } + }; + + org.apache.lucene.store.Directory directory = new org.apache.lucene.store.NIOFSDirectory(tempDir); + keyIvResolver = new DefaultKeyIvResolver(directory, cryptoProvider, keyProvider); + } + + public void testTranslogDataIsActuallyEncrypted() throws IOException { + String testTranslogUUID = "test-encryption-uuid"; + CryptoChannelFactory channelFactory = new CryptoChannelFactory(keyIvResolver, testTranslogUUID); + + Path translogPath = tempDir.resolve("test-encryption.tlog"); + + // Test data that should be encrypted + String sensitiveData = + "{\"@timestamp\": 894069207, \"clientip\":\"192.168.1.1\", \"request\": \"GET /secret/data HTTP/1.1\", \"status\": 200}"; + byte[] testData = sensitiveData.getBytes(StandardCharsets.UTF_8); + + // Write header + data using our crypto channel (with READ permission for round-trip verification) + try ( + FileChannel cryptoChannel = channelFactory + .open(translogPath, StandardOpenOption.CREATE, StandardOpenOption.READ, StandardOpenOption.WRITE) + ) { + + // First write the header + TranslogHeader header = new TranslogHeader(testTranslogUUID, 1L); + header.write(cryptoChannel, false); + int headerSize = header.sizeInBytes(); + + logger.info("Header size: {} bytes", headerSize); + + // Now write data that should be encrypted (beyond header) + ByteBuffer dataBuffer = ByteBuffer.wrap(testData); + int bytesWritten = cryptoChannel.write(dataBuffer, headerSize); + + assertEquals("Should write all test data", testData.length, bytesWritten); + } + + // CRITICAL: Read raw file content and verify data is encrypted (NOT readable) + byte[] fileContent = Files.readAllBytes(translogPath); + String fileContentString = new String(fileContent, StandardCharsets.UTF_8); + + logger.info("File size: {} bytes", fileContent.length); + logger.info("File content (first 200 chars): {}", fileContentString.substring(0, Math.min(200, fileContentString.length()))); + + assertFalse("Sensitive data found in plain text! File content: " + fileContentString, fileContentString.contains("192.168.1.1")); + + assertFalse("Sensitive data found in plain text! File content: " + fileContentString, fileContentString.contains("/secret/data")); + + assertFalse("JSON structure found in plain text! File content: " + fileContentString, fileContentString.contains("\"clientip\"")); + + // Verify header is still readable (should be unencrypted) + assertTrue("Header should contain translog UUID", fileContentString.contains(testTranslogUUID)); + } + + /** + * Verify read/write round trip works correctly. + */ + public void testTranslogEncryptionDecryptionRoundTrip() throws IOException { + String testTranslogUUID = "test-roundtrip-uuid"; + CryptoChannelFactory channelFactory = new CryptoChannelFactory(keyIvResolver, testTranslogUUID); + + Path translogPath = tempDir.resolve("test-roundtrip.tlog"); + + String originalData = "{\"test\": \"sensitive document data that must be encrypted\"}"; + byte[] testData = originalData.getBytes(StandardCharsets.UTF_8); + + int headerSize; + + // Write data + try (FileChannel writeChannel = channelFactory.open(translogPath, StandardOpenOption.CREATE, StandardOpenOption.WRITE)) { + // Write header + TranslogHeader header = new TranslogHeader(testTranslogUUID, 1L); + header.write(writeChannel, false); + headerSize = header.sizeInBytes(); + + // Write data beyond header + ByteBuffer writeBuffer = ByteBuffer.wrap(testData); + writeChannel.write(writeBuffer, headerSize); + } + + // Read data back + try (FileChannel readChannel = channelFactory.open(translogPath, StandardOpenOption.READ)) { + // Skip header + readChannel.position(headerSize); + + // Read encrypted data + ByteBuffer readBuffer = ByteBuffer.allocate(testData.length); + int bytesRead = readChannel.read(readBuffer); + + assertEquals("Should read same amount as written", testData.length, bytesRead); + + // Verify decrypted data matches original + String decryptedData = new String(readBuffer.array(), StandardCharsets.UTF_8); + assertEquals("Decrypted data should match original", originalData, decryptedData); + } + + // Verify file content is still encrypted on disk + byte[] rawFileContent = Files.readAllBytes(translogPath); + String rawContent = new String(rawFileContent, StandardCharsets.UTF_8); + + assertFalse("Data should be encrypted on disk", rawContent.contains("sensitive document data")); + } +}