diff --git a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/persistence/DefaultDataSourceProvider.java b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/persistence/DefaultDataSourceProvider.java index 42cde7d149d5..24c92c77a17b 100644 --- a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/persistence/DefaultDataSourceProvider.java +++ b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/persistence/DefaultDataSourceProvider.java @@ -20,6 +20,8 @@ import javax.sql.DataSource; import org.apache.commons.lang3.StringUtils; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; import com.google.inject.Inject; import com.google.inject.Provider; @@ -30,6 +32,9 @@ */ public class DefaultDataSourceProvider implements Provider { + private static final Logger LOG = + LoggerFactory.getLogger(DefaultDataSourceProvider.class); + @Inject private DataSourceConfiguration configuration; @@ -43,6 +48,7 @@ public class DefaultDataSourceProvider implements Provider { @Override public DataSource get() { String jdbcUrl = configuration.getJdbcUrl(); + LOG.info("JDBC Url for Recon : {} ", jdbcUrl); if (StringUtils.contains(jdbcUrl, "derby")) { return new DerbyDataSourceProvider(configuration).get(); } else if (StringUtils.contains(jdbcUrl, "sqlite")) { diff --git a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/persistence/DerbyDataSourceProvider.java b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/persistence/DerbyDataSourceProvider.java index 51678c011675..facb74e9fbda 100644 --- a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/persistence/DerbyDataSourceProvider.java +++ b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/persistence/DerbyDataSourceProvider.java @@ -47,7 +47,6 @@ public class DerbyDataSourceProvider implements Provider { @Override public DataSource get() { String jdbcUrl = configuration.getJdbcUrl(); - LOG.info("JDBC Url for Recon : {} ", jdbcUrl); try { createNewDerbyDatabase(jdbcUrl, RECON_SCHEMA_NAME); } catch (Exception e) { diff --git a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/tasks/FileSizeCountTask.java b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/tasks/FileSizeCountTask.java index 7092c548d949..e0a592ba59f3 100644 --- a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/tasks/FileSizeCountTask.java +++ b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/tasks/FileSizeCountTask.java @@ -96,7 +96,9 @@ public Pair reprocess(OMMetadataManager omMetadataManager) { return new ImmutablePair<>(getTaskName(), false); } // Truncate table before inserting new rows - dslContext.truncate(FILE_COUNT_BY_SIZE); + int execute = dslContext.delete(FILE_COUNT_BY_SIZE).execute(); + LOG.info("Deleted {} records from {}", execute, FILE_COUNT_BY_SIZE); + writeCountsToDB(true, fileSizeCountMap); LOG.info("Completed a 'reprocess' run of FileSizeCountTask."); diff --git a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/persistence/TestReconWithDifferentSqlDBs.java b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/persistence/TestReconWithDifferentSqlDBs.java index 12b9659cd5fd..0e096623eb5f 100644 --- a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/persistence/TestReconWithDifferentSqlDBs.java +++ b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/persistence/TestReconWithDifferentSqlDBs.java @@ -20,6 +20,7 @@ import static java.util.stream.Collectors.toList; import static org.apache.hadoop.ozone.recon.ReconControllerModule.ReconDaoBindingModule.RECON_DAO_LIST; import static org.hadoop.ozone.recon.codegen.SqlDbUtils.SQLITE_DRIVER_CLASS; +import static org.hadoop.ozone.recon.schema.Tables.RECON_TASK_STATUS; import static org.junit.Assert.assertEquals; import static org.junit.Assert.assertNotNull; @@ -76,6 +77,10 @@ public void testSchemaSetup() throws SQLException { ReconTaskStatusDao dao = getDao(ReconTaskStatusDao.class); dao.insert(new ReconTaskStatus("TestTask", 1L, 2L)); assertEquals(1, dao.findAll().size()); + + int numRows = getDslContext().delete(RECON_TASK_STATUS).execute(); + assertEquals(1, numRows); + assertEquals(0, dao.findAll().size()); } /** diff --git a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/tasks/TestFileSizeCountTask.java b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/tasks/TestFileSizeCountTask.java index 1cfc0ad8939a..95aa52b66b4f 100644 --- a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/tasks/TestFileSizeCountTask.java +++ b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/tasks/TestFileSizeCountTask.java @@ -27,6 +27,7 @@ import org.apache.hadoop.ozone.recon.tasks.OMDBUpdateEvent.OMUpdateEventBuilder; import org.hadoop.ozone.recon.schema.UtilizationSchemaDefinition; import org.hadoop.ozone.recon.schema.tables.daos.FileCountBySizeDao; +import org.hadoop.ozone.recon.schema.tables.pojos.FileCountBySize; import org.jooq.DSLContext; import org.jooq.Record3; import org.junit.Before; @@ -111,6 +112,11 @@ public void testReprocess() throws IOException { .thenReturn(omKeyInfo2) .thenReturn(omKeyInfo3); + // Reprocess could be called from table having existing entries. Adding + // an entry to simulate that. + fileCountBySizeDao.insert( + new FileCountBySize("vol1", "bucket1", 1024L, 10L)); + Pair result = fileSizeCountTask.reprocess(omMetadataManager); assertTrue(result.getRight());