diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/file/OMFileCreateResponse.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/file/OMFileCreateResponse.java index 88cbf8cb65d2..de490c5aa85c 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/file/OMFileCreateResponse.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/file/OMFileCreateResponse.java @@ -23,15 +23,20 @@ import org.apache.hadoop.ozone.om.helpers.OmBucketInfo; import org.apache.hadoop.ozone.om.helpers.OmKeyInfo; import org.apache.hadoop.ozone.om.helpers.OmVolumeArgs; +import org.apache.hadoop.ozone.om.response.CleanupTableInfo; import org.apache.hadoop.ozone.om.response.key.OMKeyCreateResponse; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos .OMResponse; import java.util.List; +import static org.apache.hadoop.ozone.om.OmMetadataManagerImpl.KEY_TABLE; +import static org.apache.hadoop.ozone.om.OmMetadataManagerImpl.OPEN_KEY_TABLE; + /** * Response for crate file request. */ +@CleanupTableInfo(cleanupTables = {KEY_TABLE, OPEN_KEY_TABLE}) public class OMFileCreateResponse extends OMKeyCreateResponse { public OMFileCreateResponse(@Nonnull OMResponse omResponse, diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/key/OMKeyCreateResponse.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/key/OMKeyCreateResponse.java index 2ae53591849a..86224a1a0b6f 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/key/OMKeyCreateResponse.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/key/OMKeyCreateResponse.java @@ -34,12 +34,13 @@ import org.slf4j.Logger; import org.slf4j.LoggerFactory; +import static org.apache.hadoop.ozone.om.OmMetadataManagerImpl.KEY_TABLE; import static org.apache.hadoop.ozone.om.OmMetadataManagerImpl.OPEN_KEY_TABLE; /** * Response for CreateKey request. */ -@CleanupTableInfo(cleanupTables = OPEN_KEY_TABLE) +@CleanupTableInfo(cleanupTables = {OPEN_KEY_TABLE, KEY_TABLE}) public class OMKeyCreateResponse extends OMClientResponse { public static final Logger LOG = diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/TestCleanupTableInfo.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/TestCleanupTableInfo.java index f66e3a3e1b73..a59a4e1ac720 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/TestCleanupTableInfo.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/TestCleanupTableInfo.java @@ -17,43 +17,132 @@ package org.apache.hadoop.ozone.om.response; +import com.google.common.base.Optional; +import com.google.common.collect.Iterators; +import org.apache.commons.lang3.tuple.Pair; +import org.apache.hadoop.hdds.client.BlockID; import org.apache.hadoop.hdds.conf.OzoneConfiguration; +import org.apache.hadoop.hdds.protocol.StorageType; +import org.apache.hadoop.hdds.protocol.proto.HddsProtos; +import org.apache.hadoop.hdds.protocol.proto.HddsProtos.DatanodeDetailsProto; +import org.apache.hadoop.hdds.protocol.proto.HddsProtos.Pipeline; import org.apache.hadoop.hdds.server.ServerUtils; +import org.apache.hadoop.hdds.utils.db.cache.CacheKey; +import org.apache.hadoop.hdds.utils.db.cache.CacheValue; +import org.apache.hadoop.ozone.audit.AuditLogger; import org.apache.hadoop.ozone.om.OMMetadataManager; +import org.apache.hadoop.ozone.om.OMMetrics; import org.apache.hadoop.ozone.om.OmMetadataManagerImpl; +import org.apache.hadoop.ozone.om.OzoneManager; +import org.apache.hadoop.ozone.om.ResolvedBucket; +import org.apache.hadoop.ozone.om.helpers.OmBucketInfo; +import org.apache.hadoop.ozone.om.helpers.OmVolumeArgs; +import org.apache.hadoop.ozone.om.ratis.utils.OzoneManagerDoubleBufferHelper; +import org.apache.hadoop.ozone.om.request.OMClientRequest; +import org.apache.hadoop.ozone.om.request.file.OMFileCreateRequest; +import org.apache.hadoop.ozone.om.request.key.OMKeyCreateRequest; +import org.apache.hadoop.ozone.om.response.file.OMFileCreateResponse; +import org.apache.hadoop.ozone.om.response.key.OMKeyCreateResponse; +import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.CreateFileRequest; +import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.CreateKeyRequest; +import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.KeyArgs; +import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.KeyLocation; +import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OMRequest; +import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.Type; import org.junit.Assert; +import org.junit.Before; import org.junit.Rule; import org.junit.Test; import org.junit.rules.TemporaryFolder; +import org.junit.runner.RunWith; +import org.mockito.Mock; +import org.mockito.junit.MockitoJUnitRunner; import org.reflections.Reflections; import java.io.File; +import java.io.IOException; +import java.util.Arrays; +import java.util.Collections; +import java.util.HashMap; +import java.util.List; +import java.util.Map; import java.util.Set; +import java.util.UUID; + +import static org.junit.Assert.assertEquals; +import static org.mockito.ArgumentMatchers.any; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.spy; +import static org.mockito.Mockito.times; +import static org.mockito.Mockito.verify; +import static org.mockito.Mockito.when; /** - * This tests check whether {@link OMClientResponse} have defined + * The test checks whether all {@link OMClientResponse} have defined the * {@link CleanupTableInfo} annotation. + * For certain requests it check whether it is properly defined not just the + * fact that it is defined. */ +@RunWith(MockitoJUnitRunner.class) public class TestCleanupTableInfo { + private static final String TEST_VOLUME_NAME = "testVol"; + private static final String TEST_BUCKET_NAME = "testBucket"; + private static final String TEST_KEY = "/foo/bar/baz/key"; + private static final HddsProtos.BlockID TEST_BLOCK_ID = + new BlockID(1, 1).getProtobuf(); + public static final String OM_RESPONSE_PACKAGE = + "org.apache.hadoop.ozone.om.response"; @Rule public TemporaryFolder folder = new TemporaryFolder(); + @Mock + private OMMetrics omMetrics; + + @Mock + private OzoneManagerDoubleBufferHelper dbh; + + private OzoneManager om; + + /** + * Creates a mock Ozone Manager object. + * Defined behaviour in the mock: + * - returns the specified metrics instance + * - returns the specified metadataManager + * - resolves the bucket links to themselves (no symlinks) + * - disables ACLs + * - provides an audit logger + * + * @return the mocked Ozone Manager + * @throws IOException should not happen but declared in mocked methods + */ + @Before + public void setupOzoneManagerMock() + throws IOException { + om = mock(OzoneManager.class); + OMMetadataManager metaMgr = createOMMetadataManagerSpy(); + when(om.getMetrics()).thenReturn(omMetrics); + when(om.getMetadataManager()).thenReturn(metaMgr); + when(om.resolveBucketLink(any(KeyArgs.class), any(OMClientRequest.class))) + .thenAnswer( + invocationOnMock -> { + Pair pair = + Pair.of(TEST_VOLUME_NAME, TEST_BUCKET_NAME); + return new ResolvedBucket(pair, pair); + } + ); + when(om.getAclsEnabled()).thenReturn(false); + when(om.getAuditLogger()).thenReturn(mock(AuditLogger.class)); + addVolumeToMetaTable(aVolumeArgs()); + addBucketToMetaTable(aBucketInfo()); + } + @Test - public void checkAnnotationAndTableName() throws Exception { - OzoneConfiguration conf = new OzoneConfiguration(); - File newFolder = folder.newFolder(); - if (!newFolder.exists()) { - Assert.assertTrue(newFolder.mkdirs()); - } - ServerUtils.setOzoneMetaDirPath(conf, newFolder.toString()); - OMMetadataManager omMetadataManager = new OmMetadataManagerImpl(conf); + public void checkAnnotationAndTableName() { + OMMetadataManager omMetadataManager = om.getMetadataManager(); Set tables = omMetadataManager.listTableNames(); - Reflections reflections = new Reflections( - "org.apache.hadoop.ozone.om.response"); - Set> subTypes = - reflections.getSubTypesOf(OMClientResponse.class); + Set> subTypes = responseClasses(); subTypes.forEach(aClass -> { Assert.assertTrue(aClass + "does not have annotation of" + " CleanupTableInfo", @@ -66,4 +155,217 @@ public void checkAnnotationAndTableName() throws Exception { } }); } + + private Set> responseClasses() { + Reflections reflections = new Reflections(OM_RESPONSE_PACKAGE); + return reflections.getSubTypesOf(OMClientResponse.class); + } + + @Test + public void testFileCreateRequestSetsAllTouchedTableCachesForEviction() { + OMFileCreateRequest request = anOMFileCreateRequest(); + Map cacheItemCount = recordCacheItemCounts(); + + request.validateAndUpdateCache(om, 1, dbh); + + assertCacheItemCounts(cacheItemCount, OMFileCreateResponse.class); + verify(omMetrics, times(1)).incNumCreateFile(); + } + + @Test + public void testKeyCreateRequestSetsAllTouchedTableCachesForEviction() { + OMKeyCreateRequest request = anOMKeyCreateRequest(); + when(om.getEnableFileSystemPaths()).thenReturn(true); + + Map cacheItemCount = recordCacheItemCounts(); + + request.validateAndUpdateCache(om, 1, dbh); + + assertCacheItemCounts(cacheItemCount, OMKeyCreateResponse.class); + verify(omMetrics, times(1)).incNumKeyAllocates(); + } + + + + private Map recordCacheItemCounts() { + Map cacheItemCount = new HashMap<>(); + for (String tableName : om.getMetadataManager().listTableNames()) { + cacheItemCount.put( + tableName, + Iterators.size( + om.getMetadataManager().getTable(tableName).cacheIterator() + ) + ); + } + return cacheItemCount; + } + + private void assertCacheItemCounts(Map cacheItemCount, + Class responseClass + ) { + CleanupTableInfo ann = responseClass.getAnnotation(CleanupTableInfo.class); + List cleanup = Arrays.asList(ann.cleanupTables()); + for (String tableName : om.getMetadataManager().listTableNames()) { + if (!cleanup.contains(tableName)) { + assertEquals( + "Cache item count of table " +tableName, + cacheItemCount.get(tableName).intValue(), + Iterators.size( + om.getMetadataManager().getTable(tableName).cacheIterator() + ) + ); + } + } + } + + /** + * Adds the volume info to the volumeTable in the MetadataManager, and also + * add the value to the table's cache. + * + * @param volumeArgs the OMVolumeArgs object specifying the volume propertes + * @throws IOException if an IO issue occurs while wrtiing to RocksDB + */ + private void addVolumeToMetaTable(OmVolumeArgs volumeArgs) + throws IOException { + String volumeKey = om.getMetadataManager().getVolumeKey(TEST_VOLUME_NAME); + om.getMetadataManager().getVolumeTable().put(volumeKey, volumeArgs); + om.getMetadataManager().getVolumeTable().addCacheEntry( + new CacheKey<>(volumeKey), + new CacheValue<>(Optional.of(volumeArgs), 2) + ); + } + + /** + * Adds the bucket info to the bucketTable in the MetadataManager, and also + * adds the value to the table's cache. + * + * @param bucketInfo the OMBucketInfo object specifying the bucket properties + * @throws IOException if an IO issue occurs while writing to RocksDB + */ + private void addBucketToMetaTable(OmBucketInfo bucketInfo) + throws IOException { + String bucketKey = om.getMetadataManager() + .getBucketKey(bucketInfo.getVolumeName(), bucketInfo.getBucketName()); + om.getMetadataManager().getBucketTable().put(bucketKey, bucketInfo); + om.getMetadataManager().getBucketTable().addCacheEntry( + new CacheKey<>(bucketKey), + new CacheValue<>(Optional.of(bucketInfo), 1) + ); + } + + /** + * Creates a spy object over an instantiated OMMetadataManager, giving the + * possibility to redefine behaviour. In the current implementation + * there isn't any behaviour which is redefined. + * + * @return the OMMetadataManager spy instance created. + * @throws IOException if I/O error occurs in setting up data store for the + * metadata manager. + */ + private OMMetadataManager createOMMetadataManagerSpy() throws IOException { + OzoneConfiguration conf = new OzoneConfiguration(); + File newFolder = folder.newFolder(); + if (!newFolder.exists()) { + Assert.assertTrue(newFolder.mkdirs()); + } + ServerUtils.setOzoneMetaDirPath(conf, newFolder.toString()); + return spy(new OmMetadataManagerImpl(conf)); + } + + private OMFileCreateRequest anOMFileCreateRequest() { + OMRequest protoRequest = mock(OMRequest.class); + when(protoRequest.getCreateFileRequest()).thenReturn(aCreateFileRequest()); + when(protoRequest.getCmdType()).thenReturn(Type.CreateFile); + when(protoRequest.getTraceID()).thenReturn(""); + return new OMFileCreateRequest(protoRequest); + } + + private OMKeyCreateRequest anOMKeyCreateRequest(){ + OMRequest protoRequest = mock(OMRequest.class); + when(protoRequest.getCreateKeyRequest()).thenReturn(aKeyCreateRequest()); + when(protoRequest.getCmdType()).thenReturn(Type.CreateKey); + when(protoRequest.getTraceID()).thenReturn(""); + return new OMKeyCreateRequest(protoRequest); + } + + private OmBucketInfo aBucketInfo() { + return OmBucketInfo.newBuilder() + .setVolumeName(TEST_VOLUME_NAME) + .setBucketName(TEST_BUCKET_NAME) + .setAcls(Collections.emptyList()) + .setIsVersionEnabled(false) + .setStorageType(StorageType.DEFAULT) + .build(); + } + + private OmVolumeArgs aVolumeArgs() { + return OmVolumeArgs.newBuilder() + .setAdminName("admin") + .setOwnerName("owner") + .setVolume(TEST_VOLUME_NAME) + .build(); + } + + private CreateFileRequest aCreateFileRequest() { + return CreateFileRequest.newBuilder() + .setKeyArgs(aKeyArgs()) + .setIsRecursive(true) + .setIsOverwrite(false) + .setClientID(1L) + .build(); + } + + private CreateKeyRequest aKeyCreateRequest() { + return CreateKeyRequest.newBuilder() + .setKeyArgs(aKeyArgs()) + .setClientID(1L) + .build(); + } + + private KeyArgs aKeyArgs() { + return KeyArgs.newBuilder() + .setVolumeName(TEST_VOLUME_NAME) + .setBucketName(TEST_BUCKET_NAME) + .setKeyName(TEST_KEY) + .setDataSize(512L) + .addKeyLocations(aKeyLocation(TEST_BLOCK_ID)) + .addKeyLocations(aKeyLocation(TEST_BLOCK_ID)) + .addKeyLocations(aKeyLocation(TEST_BLOCK_ID)) + .build(); + } + + private KeyLocation aKeyLocation( + HddsProtos.BlockID blockID) { + return KeyLocation.newBuilder() + .setBlockID(blockID) + .setOffset(0) + .setLength(512) + .setCreateVersion(0) + .setPipeline(aPipeline()) + .build(); + } + + private Pipeline aPipeline() { + return Pipeline.newBuilder() + .setId(aPipelineID()) + .addMembers(aDatanodeDetailsProto("192.168.1.1", "host1")) + .addMembers(aDatanodeDetailsProto("192.168.1.2", "host2")) + .addMembers(aDatanodeDetailsProto("192.168.1.3", "host3")) + .build(); + } + + private DatanodeDetailsProto aDatanodeDetailsProto(String s, + String host1) { + return DatanodeDetailsProto.newBuilder() + .setUuid(UUID.randomUUID().toString()) + .setIpAddress(s) + .setHostName(host1) + .build(); + } + + private HddsProtos.PipelineID aPipelineID() { + return HddsProtos.PipelineID.newBuilder() + .setId(UUID.randomUUID().toString()) + .build(); + } }